118#define DEBUG_TYPE "arm-isel"
121STATISTIC(NumMovwMovt,
"Number of GAs materialized with movw + movt");
122STATISTIC(NumLoopByVals,
"Number of loops generated for byval arguments");
124 "Number of constants with their storage promoted into constant pools");
128 cl::desc(
"Enable / disable ARM interworking (for debugging only)"),
133 cl::desc(
"Enable / disable promotion of unnamed_addr constants into "
138 cl::desc(
"Maximum size of constant to promote into a constant pool"),
142 cl::desc(
"Maximum size of ALL constants to promote into a constant pool"),
147 ARM::R0, ARM::R1, ARM::R2, ARM::R3
150void ARMTargetLowering::addTypeForNEON(
MVT VT,
MVT PromotedLdStVT,
151 MVT PromotedBitwiseVT) {
152 if (VT != PromotedLdStVT) {
191 if (VT.
isInteger() && VT != PromotedBitwiseVT) {
214void ARMTargetLowering::addDRTypeForNEON(
MVT VT) {
219void ARMTargetLowering::addQRTypeForNEON(
MVT VT) {
224void ARMTargetLowering::setAllExpand(
MVT VT) {
237void ARMTargetLowering::addAllExtLoads(
const MVT From,
const MVT To,
238 LegalizeAction Action) {
244void ARMTargetLowering::addMVEVectorTypes(
bool HasMVEFP) {
247 for (
auto VT : IntTypes) {
277 for (
auto VT : FloatTypes) {
316 for (
auto VT : LongTypes) {
351 for (
int LCID = 0; LCID < RTLIB::UNKNOWN_LIBCALL; ++LCID)
361 static const struct {
363 const char *
const Name;
385 { RTLIB::UO_F32,
"__unordsf2vfp",
ISD::SETNE },
386 { RTLIB::O_F32,
"__unordsf2vfp",
ISD::SETEQ },
395 { RTLIB::UO_F64,
"__unorddf2vfp",
ISD::SETNE },
396 { RTLIB::O_F64,
"__unorddf2vfp",
ISD::SETEQ },
421 for (
const auto &LC : LibraryCalls) {
438 static const struct {
440 const char *
const Name;
527 for (
const auto &LC : LibraryCalls) {
537 static const struct {
539 const char *
const Name;
542 } MemOpsLibraryCalls[] = {
550 for (
const auto &LC : MemOpsLibraryCalls) {
560 static const struct {
562 const char *
const Name;
575 for (
const auto &LC : LibraryCalls) {
607 static const struct {
609 const char *
const Name;
617 for (
const auto &LC : LibraryCalls) {
651 addAllExtLoads(VT, InnerVT,
Expand);
1041 HasStandaloneRem =
false;
1046 const char *
const Name;
1048 } LibraryCalls[] = {
1060 for (
const auto &LC : LibraryCalls) {
1067 const char *
const Name;
1069 } LibraryCalls[] = {
1081 for (
const auto &LC : LibraryCalls) {
1123 InsertFencesForAtomic =
false;
1137 InsertFencesForAtomic =
true;
1144 InsertFencesForAtomic =
true;
1164 if (!InsertFencesForAtomic) {
1395std::pair<const TargetRegisterClass *, uint8_t>
1408 RRC = &ARM::DPRRegClass;
1418 RRC = &ARM::DPRRegClass;
1422 RRC = &ARM::DPRRegClass;
1426 RRC = &ARM::DPRRegClass;
1430 return std::make_pair(RRC, Cost);
1614 return &ARM::QQPRRegClass;
1616 return &ARM::QQQQPRRegClass;
1625 unsigned &PrefAlign)
const {
1626 if (!isa<MemIntrinsic>(CI))
1643 unsigned NumVals =
N->getNumValues();
1647 for (
unsigned i = 0; i != NumVals; ++i) {
1648 EVT VT =
N->getValueType(i);
1655 if (!
N->isMachineOpcode())
1679 if (
auto Const = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
1680 return Const->getZExtValue() == 16;
1687 if (
auto Const = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
1688 return Const->getZExtValue() == 16;
1695 if (
auto Const = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
1696 return Const->getZExtValue() == 16;
1706 return isSHL16(Op.getOperand(0));
1731 InvalidOnQNaN =
true;
1737 InvalidOnQNaN =
false;
1748 InvalidOnQNaN =
false;
1755 InvalidOnQNaN =
false;
1766 InvalidOnQNaN =
false;
1780 bool isVarArg)
const {
1817 bool isVarArg)
const {
1818 return CCAssignFnForNode(CC,
false, isVarArg);
1822 bool isVarArg)
const {
1823 return CCAssignFnForNode(CC,
true, isVarArg);
1830 bool isVarArg)
const {
1831 switch (getEffectiveCallingConv(CC, isVarArg)) {
1851SDValue ARMTargetLowering::LowerCallResult(
1863 for (
unsigned i = 0; i != RVLocs.
size(); ++i) {
1868 if (i == 0 && isThisReturn) {
1870 "unexpected return calling convention register assignment");
1880 Chain =
Lo.getValue(1);
1881 InFlag =
Lo.getValue(2);
1885 Chain =
Hi.getValue(1);
1886 InFlag =
Hi.getValue(2);
1898 Chain =
Lo.getValue(1);
1899 InFlag =
Lo.getValue(2);
1902 Chain =
Hi.getValue(1);
1903 InFlag =
Hi.getValue(2);
1942 Chain, dl,
Arg, PtrOff,
1948 RegsToPassVector &RegsToPass,
1955 unsigned id = Subtarget->
isLittle() ? 0 : 1;
1991 bool isStructRet = (Outs.
empty()) ?
false : Outs[0].Flags.
isSRet();
1992 bool isThisReturn =
false;
1993 auto Attr = MF.
getFunction().getFnAttribute(
"disable-tail-calls");
1994 bool PreferIndirect =
false;
2000 if (isa<GlobalAddressSDNode>(
Callee)) {
2004 auto *GV = cast<GlobalAddressSDNode>(
Callee)->getGlobal();
2009 return isa<Instruction>(U) &&
2010 cast<Instruction>(U)->getParent() == BB;
2016 isTailCall = IsEligibleForTailCallOptimization(
2017 Callee, CallConv, isVarArg, isStructRet,
2018 MF.
getFunction().hasStructRetAttr(), Outs, OutVals, Ins, DAG,
2022 "site marked musttail");
2036 unsigned NumBytes = CCInfo.getNextStackOffset();
2050 RegsToPassVector RegsToPass;
2055 for (
unsigned i = 0, realArgIdx = 0, e = ArgLocs.
size();
2057 ++i, ++realArgIdx) {
2061 bool isByVal = Flags.
isByVal();
2089 PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass,
2090 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags);
2094 PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass,
2095 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags);
2099 MemOpChains.
push_back(LowerMemOpCallTo(Chain, StackPtr, Op1,
2100 dl, DAG, VA, Flags));
2103 PassF64ArgInRegs(dl, DAG, Chain,
Arg, RegsToPass, VA, ArgLocs[++i],
2104 StackPtr, MemOpChains, Flags);
2110 "unexpected calling convention register assignment");
2112 "unexpected use of 'returned'");
2113 isThisReturn =
true;
2115 RegsToPass.push_back(std::make_pair(VA.
getLocReg(),
Arg));
2116 }
else if (isByVal) {
2118 unsigned offset = 0;
2122 unsigned ByValArgsCount = CCInfo.getInRegsParamsCount();
2123 unsigned CurByValIdx = CCInfo.getInRegsParamsProcessed();
2125 if (CurByValIdx < ByValArgsCount) {
2127 unsigned RegBegin, RegEnd;
2128 CCInfo.getInRegsParamInfo(CurByValIdx, RegBegin, RegEnd);
2133 for (i = 0, j = RegBegin; j < RegEnd; i++, j++) {
2140 RegsToPass.push_back(std::make_pair(j, Load));
2145 offset = RegEnd - RegBegin;
2147 CCInfo.nextInRegsParam();
2163 SDValue Ops[] = { Chain, Dst, Src, SizeNode, AlignNode};
2167 }
else if (!isTailCall) {
2170 MemOpChains.
push_back(LowerMemOpCallTo(Chain, StackPtr,
Arg,
2171 dl, DAG, VA, Flags));
2175 if (!MemOpChains.
empty())
2181 for (
unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
2183 RegsToPass[i].
second, InFlag);
2190 bool isDirect =
false;
2196 GV =
G->getGlobal();
2200 bool isARMFunc = !Subtarget->
isThumb() || (isStub && !Subtarget->
isMClass());
2201 bool isLocalARMFunc =
false;
2207 "long-calls codegen is not position independent!");
2211 if (isa<GlobalAddressSDNode>(
Callee)) {
2224 const char *Sym = S->getSymbol();
2230 ARMPCLabelIndex, 0);
2238 }
else if (isa<GlobalAddressSDNode>(
Callee)) {
2239 if (!PreferIndirect) {
2258 "Windows is the only supported COFF target");
2276 const char *Sym = S->getSymbol();
2281 ARMPCLabelIndex, 4);
2297 if ((!isDirect || isARMFunc) && !Subtarget->
hasV5TOps())
2302 if (!isDirect && !Subtarget->
hasV5TOps())
2313 std::vector<SDValue> Ops;
2314 Ops.push_back(Chain);
2319 for (
unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
2320 Ops.push_back(DAG.
getRegister(RegsToPass[i].first,
2321 RegsToPass[i].second.getValueType()));
2334 isThisReturn =
false;
2340 assert(Mask &&
"Missing call preserved mask for calling convention");
2345 Ops.push_back(InFlag);
2354 Chain = DAG.
getNode(CallOpc, dl, NodeTys, Ops);
2364 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG,
2365 InVals, isThisReturn,
2366 isThisReturn ? OutVals[0] :
SDValue());
2373void ARMTargetLowering::HandleByVal(
CCState *State,
unsigned &
Size,
2374 unsigned Align)
const {
2376 Align = std::max(Align, 4U);
2382 unsigned AlignInRegs =
Align / 4;
2383 unsigned Waste = (ARM::R4 -
Reg) % AlignInRegs;
2384 for (
unsigned i = 0; i < Waste; ++i)
2390 unsigned Excess = 4 * (ARM::R4 -
Reg);
2397 if (NSAAOffset != 0 &&
Size > Excess) {
2409 unsigned ByValRegBegin =
Reg;
2410 unsigned ByValRegEnd = std::min<unsigned>(Reg +
Size / 4, ARM::R4);
2414 for (
unsigned i = Reg + 1; i != ByValRegEnd; ++i)
2420 Size = std::max<int>(
Size - Excess, 0);
2430 unsigned Bytes =
Arg.getValueSizeInBits() / 8;
2431 int FI = std::numeric_limits<int>::max();
2433 unsigned VR = cast<RegisterSDNode>(
Arg.getOperand(1))->getReg();
2453 SDValue Ptr = Ld->getBasePtr();
2461 assert(FI != std::numeric_limits<int>::max());
2470bool ARMTargetLowering::IsEligibleForTailCallOptimization(
2472 bool isCalleeStructRet,
bool isCallerStructRet,
2476 const bool isIndirect)
const {
2487 (!isa<GlobalAddressSDNode>(
Callee.getNode()) || isIndirect))
2501 if (isCalleeStructRet || isCallerStructRet)
2515 (!
TT.isOSWindows() ||
TT.isOSBinFormatELF() ||
TT.isOSBinFormatMachO()))
2527 const uint32_t *CallerPreserved =
TRI->getCallPreservedMask(MF, CallerCC);
2528 if (CalleeCC != CallerCC) {
2529 const uint32_t *CalleePreserved =
TRI->getCallPreservedMask(MF, CalleeCC);
2530 if (!
TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
2543 if (!Outs.
empty()) {
2547 CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs,
C);
2549 if (CCInfo.getNextStackOffset()) {
2555 for (
unsigned i = 0, realArgIdx = 0, e = ArgLocs.
size();
2557 ++i, ++realArgIdx) {
2571 if (!ArgLocs[++i].isRegLoc())
2574 if (!ArgLocs[++i].isRegLoc())
2576 if (!ArgLocs[++i].isRegLoc())
2601 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
2610 StringRef IntKind =
F.getFnAttribute(
"interrupt").getValueAsString();
2623 if (IntKind ==
"" || IntKind ==
"IRQ" || IntKind ==
"FIQ" ||
2626 else if (IntKind ==
"SWI" || IntKind ==
"UNDEF")
2630 "must be one of: IRQ, FIQ, SWI, ABORT or UNDEF");
2657 bool isLittleEndian = Subtarget->
isLittle();
2664 for (
unsigned i = 0, realRVLocIdx = 0;
2666 ++i, ++realRVLocIdx) {
2671 bool ReturnF16 =
false;
2714 HalfGPRs.
getValue(isLittleEndian ? 0 : 1),
2720 HalfGPRs.
getValue(isLittleEndian ? 1 : 0),
2735 fmrrd.
getValue(isLittleEndian ? 0 : 1),
2741 fmrrd.
getValue(isLittleEndian ? 1 : 0),
2787bool ARMTargetLowering::isUsedByReturnOnly(
SDNode *
N,
SDValue &Chain)
const {
2788 if (
N->getNumValues() != 1)
2790 if (!
N->hasNUsesOfValue(1, 0))
2800 TCChain =
Copy->getOperand(0);
2824 if (UI->getOperand(UI->getNumOperands()-1).getValueType() ==
MVT::Glue)
2832 if (!
Copy->hasOneUse())
2841 TCChain =
Copy->getOperand(0);
2846 bool HasRet =
false;
2862bool ARMTargetLowering::mayBeEmittedAsTailCall(
const CallInst *CI)
const {
2868 if (!CI->
isTailCall() || Attr.getValueAsString() ==
"true")
2878 SDValue WriteValue = Op->getOperand(2);
2882 &&
"LowerWRITE_REGISTER called for non-i64 type argument.");
2888 SDValue Ops[] = { Op->getOperand(0), Op->getOperand(1), Lo, Hi };
2900 EVT PtrVT =
Op.getValueType();
2912 auto T =
const_cast<Type*
>(
CP->getType());
2913 auto C =
const_cast<Constant*
>(
CP->getConstVal());
2924 return LowerGlobalAddress(GA, DAG);
2927 if (
CP->isMachineConstantPoolEntry())
2929 CP->getAlignment());
2932 CP->getAlignment());
2944 unsigned ARMPCLabelIndex = 0;
2947 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
2950 if (!IsPositionIndependent) {
2953 unsigned PCAdj = Subtarget->
isThumb() ? 4 : 8;
2964 if (!IsPositionIndependent)
2995ARMTargetLowering::LowerGlobalTLSAddressDarwin(
SDValue Op,
2998 "This function expects a Darwin target");
3003 SDValue DescAddr = LowerGlobalAddressDarwin(Op, DAG);
3040ARMTargetLowering::LowerGlobalTLSAddressWindows(
SDValue Op,
3084 const auto *GA = cast<GlobalAddressSDNode>(Op);
3100 unsigned char PCAdj = Subtarget->
isThumb() ? 4 : 8;
3130 std::pair<SDValue, SDValue> CallResult =
LowerCallTo(CLI);
3131 return CallResult.first;
3153 unsigned char PCAdj = Subtarget->
isThumb() ? 4 : 8;
3161 PtrVT, dl, Chain, Offset,
3163 Chain =
Offset.getValue(1);
3169 PtrVT, dl, Chain, Offset,
3179 PtrVT, dl, Chain, Offset,
3195 return LowerGlobalTLSAddressDarwin(Op, DAG);
3198 return LowerGlobalTLSAddressWindows(Op, DAG);
3207 return LowerToTLSGeneralDynamicModel(GA, DAG);
3210 return LowerToTLSExecModels(GA, DAG, model);
3219 for (
auto *U : V->
users())
3221 while (!Worklist.
empty()) {
3223 if (isa<ConstantExpr>(U)) {
3224 for (
auto *UU : U->users())
3229 auto *
I = dyn_cast<Instruction>(U);
3230 if (!
I ||
I->getParent()->getParent() !=
F)
3258 auto *GVar = dyn_cast<GlobalVariable>(GV);
3259 if (!GVar || !GVar->hasInitializer() ||
3260 !GVar->isConstant() || !GVar->hasGlobalUnnamedAddr() ||
3261 !GVar->hasLocalLinkage())
3266 auto *
Init = GVar->getInitializer();
3268 Init->needsRelocation())
3277 auto *CDAInit = dyn_cast<ConstantDataArray>(
Init);
3280 unsigned RequiredPadding = 4 - (
Size % 4);
3281 bool PaddingPossible =
3282 RequiredPadding == 4 || (CDAInit && CDAInit->isString());
3287 unsigned PaddedSize =
Size + ((RequiredPadding == 4) ? 0 : RequiredPadding);
3311 if (RequiredPadding != 4) {
3316 while (RequiredPadding--)
3329 ++NumConstpoolPromoted;
3334 if (
const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
3335 if (!(GV = GA->getBaseObject()))
3337 if (
const auto *V = dyn_cast<GlobalVariable>(GV))
3338 return V->isConstant();
3339 return isa<Function>(GV);
3347 return LowerGlobalAddressWindows(Op, DAG);
3349 return LowerGlobalAddressELF(Op, DAG);
3351 return LowerGlobalAddressDarwin(Op, DAG);
3359 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
3369 bool UseGOT_PREL = !
TM.shouldAssumeDSOLocal(*GV->
getParent(), GV);
3378 }
else if (Subtarget->
isROPI() && IsRO) {
3383 }
else if (Subtarget->
isRWPI() && !IsRO) {
3424 "ROPI/RWPI not currently supported for Darwin");
3427 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
3450 "Windows on ARM expects to use movw/movt");
3452 "ROPI/RWPI not currently supported for Windows");
3455 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
3459 else if (!
TM.shouldAssumeDSOLocal(*GV->
getParent(), GV))
3484 Op.getOperand(1), Val);
3494SDValue ARMTargetLowering::LowerEH_SJLJ_SETUP_DISPATCH(
SDValue Op,
3504 unsigned IntNo = cast<ConstantSDNode>(
Op.getOperand(0))->getZExtValue();
3508 case Intrinsic::thread_pointer: {
3512 case Intrinsic::eh_sjlj_lsda: {
3519 unsigned PCAdj = IsPositionIndependent ? (Subtarget->
isThumb() ? 4 : 8) : 0;
3529 if (IsPositionIndependent) {
3535 case Intrinsic::arm_neon_vabs:
3538 case Intrinsic::arm_neon_vmulls:
3539 case Intrinsic::arm_neon_vmullu: {
3540 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmulls)
3543 Op.getOperand(1),
Op.getOperand(2));
3545 case Intrinsic::arm_neon_vminnm:
3546 case Intrinsic::arm_neon_vmaxnm: {
3547 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vminnm)
3550 Op.getOperand(1),
Op.getOperand(2));
3552 case Intrinsic::arm_neon_vminu:
3553 case Intrinsic::arm_neon_vmaxu: {
3554 if (
Op.getValueType().isFloatingPoint())
3556 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vminu)
3559 Op.getOperand(1),
Op.getOperand(2));
3561 case Intrinsic::arm_neon_vmins:
3562 case Intrinsic::arm_neon_vmaxs: {
3564 if (!
Op.getValueType().isFloatingPoint()) {
3565 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins)
3568 Op.getOperand(1),
Op.getOperand(2));
3570 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins)
3573 Op.getOperand(1),
Op.getOperand(2));
3575 case Intrinsic::arm_neon_vtbl1:
3577 Op.getOperand(1),
Op.getOperand(2));
3578 case Intrinsic::arm_neon_vtbl2:
3580 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
3587 ConstantSDNode *SSIDNode = cast<ConstantSDNode>(Op.getOperand(2));
3597 "Unexpected ISD::ATOMIC_FENCE encountered. Should be libcall!");
3627 return Op.getOperand(0);
3630 unsigned isRead = ~cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() & 1;
3634 return Op.getOperand(0);
3636 unsigned isData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue();
3639 isRead = ~isRead & 1;
3640 isData = ~isData & 1;
3657 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3658 return DAG.
getStore(Op.getOperand(0), dl, FR, Op.getOperand(1),
3666 const SDLoc &dl)
const {
3672 RC = &ARM::tGPRRegClass;
3674 RC = &ARM::GPRRegClass;
3709 const Value *OrigArg,
3710 unsigned InRegsParamRecordIdx,
3711 int ArgOffset,
unsigned ArgSize)
const {
3726 unsigned RBegin, REnd;
3736 ArgOffset = -4 * (ARM::R4 - RBegin);
3746 for (
unsigned Reg = RBegin, i = 0;
Reg < REnd; ++
Reg, ++i) {
3755 if (!MemOps.
empty())
3764 unsigned TotalArgRegsSaveSize,
3765 bool ForceMutable)
const {
3774 int FrameIndex = StoreByValRegs(CCInfo, DAG, dl, Chain,
nullptr,
3777 std::max(4U, TotalArgRegsSaveSize));
3781SDValue ARMTargetLowering::LowerFormalArguments(
3799 unsigned CurArgIdx = 0;
3811 unsigned ArgRegBegin = ARM::R4;
3812 for (
unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3823 unsigned RBegin, REnd;
3825 ArgRegBegin = std::min(ArgRegBegin, RBegin);
3831 int lastInsIndex = -1;
3835 ArgRegBegin = std::min(ArgRegBegin, (
unsigned)
GPRArgRegs[RegIdx]);
3838 unsigned TotalArgRegsSaveSize = 4 * (ARM::R4 - ArgRegBegin);
3842 for (
unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3844 if (Ins[VA.
getValNo()].isOrigArg()) {
3845 std::advance(CurOrigArg,
3846 Ins[VA.
getValNo()].getOrigArgIndex() - CurArgIdx);
3857 SDValue ArgValue1 = GetF64FormalArgument(VA, ArgLocs[++i],
3868 ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i],
3873 ArgValue, ArgValue1,
3876 ArgValue, ArgValue2,
3879 ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl);
3885 RC = &ARM::HPRRegClass;
3887 RC = &ARM::SPRRegClass;
3889 RC = &ARM::DPRRegClass;
3891 RC = &ARM::QPRRegClass;
3894 : &ARM::GPRRegClass;
3934 if (index != lastInsIndex)
3943 assert(Ins[index].isOrigArg() &&
3944 "Byval arguments cannot be implicit");
3948 CCInfo, DAG, dl, Chain, &*CurOrigArg, CurByValIndex,
3963 lastInsIndex = index;
3970 VarArgStyleRegisters(CCInfo, DAG, dl, Chain,
3972 TotalArgRegsSaveSize);
3982 return CFP->getValueAPF().isPosZero();
3986 SDValue WrapperOp = Op.getOperand(1).getOperand(0);
3988 if (
const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
3989 return CFP->getValueAPF().isPosZero();
3995 SDValue BitcastOp = Op->getOperand(0);
4007 const SDLoc &dl)
const {
4009 unsigned C = RHSC->getZExtValue();
4087 bool InvalidOnQNaN)
const {
4102 unsigned Opc = Cmp.getOpcode();
4105 return DAG.
getNode(Opc, DL,
MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1));
4108 Cmp = Cmp.getOperand(0);
4109 Opc = Cmp.getOpcode();
4112 Cmp.getOperand(1), Cmp.getOperand(2));
4125std::pair<SDValue, SDValue>
4140 switch (
Op.getOpcode()) {
4192 return std::make_pair(
Value, OverflowCmp);
4203 std::tie(
Value, OverflowCmp) = getARMXALUOOp(Op, DAG, ARMcc);
4209 EVT VT =
Op.getValueType();
4212 ARMcc, CCR, OverflowCmp);
4220 SDLoc DL(BoolCarry);
4252 EVT VT =
Op.getValueType();
4256 switch (
Op.getOpcode()) {
4284 unsigned Opc = Cond.getOpcode();
4286 if (Cond.getResNo() == 1 &&
4294 std::tie(
Value, OverflowCmp) = getARMXALUOOp(Cond, DAG, ARMcc);
4296 EVT VT =
Op.getValueType();
4298 return getCMOV(dl, VT, SelectTrue, SelectFalse, ARMcc, CCR,
4307 if (Cond.getOpcode() ==
ARMISD::CMOV && Cond.hasOneUse()) {
4309 dyn_cast<ConstantSDNode>(Cond.getOperand(0));
4311 dyn_cast<ConstantSDNode>(Cond.getOperand(1));
4313 if (CMOVTrue && CMOVFalse) {
4319 if (CMOVTrueVal == 1 && CMOVFalseVal == 0) {
4321 False = SelectFalse;
4322 }
else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) {
4328 EVT VT =
Op.getValueType();
4329 SDValue ARMcc = Cond.getOperand(2);
4330 SDValue CCR = Cond.getOperand(3);
4331 SDValue Cmp = duplicateCmp(Cond.getOperand(4), DAG);
4333 return getCMOV(dl, VT, True, False, ARMcc, CCR, Cmp, DAG);
4349 bool &swpCmpOps,
bool &swpVselOps) {
4377 swpCmpOps = !swpCmpOps;
4378 swpVselOps = !swpVselOps;
4415 ARMcc, CCR, duplicateCmp(Cmp, DAG));
4442 ((K == LHS && K == TrueVal) || (K == RHS && K == FalseVal))) ||
4444 ((K == RHS && K == TrueVal) || (K == LHS && K == FalseVal)));
4452 ((K == RHS && K == TrueVal) || (K == LHS && K == FalseVal))) ||
4454 ((K == LHS && K == TrueVal) || (K == RHS && K == FalseVal)));
4475 uint64_t &K,
bool &usat) {
4476 SDValue LHS1 = Op.getOperand(0);
4477 SDValue RHS1 = Op.getOperand(1);
4478 SDValue TrueVal1 = Op.getOperand(2);
4479 SDValue FalseVal1 = Op.getOperand(3);
4480 ISD::CondCode CC1 = cast<CondCodeSDNode>(Op.getOperand(4))->get();
4482 const SDValue Op2 = isa<ConstantSDNode>(TrueVal1) ? FalseVal1 : TrueVal1;
4494 SDValue *K1 = isa<ConstantSDNode>(LHS1) ? &LHS1 : isa<ConstantSDNode>(RHS1)
4497 SDValue *K2 = isa<ConstantSDNode>(LHS2) ? &LHS2 : isa<ConstantSDNode>(RHS2)
4500 SDValue K2Tmp = isa<ConstantSDNode>(TrueVal2) ? TrueVal2 : FalseVal2;
4501 SDValue V1Tmp = (K1 && *K1 == LHS1) ? RHS1 : LHS1;
4502 SDValue V2Tmp = (K2 && *K2 == LHS2) ? RHS2 : LHS2;
4503 SDValue V2 = (K2Tmp == TrueVal2) ? FalseVal2 : TrueVal2;
4515 if (!K1 || !K2 || *K1 == Op2 || *K2 != K2Tmp || V1Tmp != V2Tmp ||
4533 if (!UpperCheckOp || !LowerCheckOp || LowerCheckOp == UpperCheckOp)
4539 int64_t Val1 = cast<ConstantSDNode>(*K1)->getSExtValue();
4540 int64_t Val2 = cast<ConstantSDNode>(*K2)->getSExtValue();
4541 int64_t PosVal = std::max(Val1, Val2);
4542 int64_t NegVal = std::min(Val1, Val2);
4544 if (((Val1 > Val2 && UpperCheckOp == &Op) ||
4545 (Val1 < Val2 && UpperCheckOp == &Op2)) &&
4551 else if (NegVal == 0)
4557 K = (uint64_t)PosVal;
4576 SDValue LHS = Op.getOperand(0);
4577 SDValue RHS = Op.getOperand(1);
4578 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
4579 SDValue TrueVal = Op.getOperand(2);
4580 SDValue FalseVal = Op.getOperand(3);
4582 SDValue *K = isa<ConstantSDNode>(LHS) ? &LHS : isa<ConstantSDNode>(RHS)
4590 SDValue KTmp = isa<ConstantSDNode>(TrueVal) ? TrueVal : FalseVal;
4591 V = (KTmp == TrueVal) ? FalseVal : TrueVal;
4592 SDValue VTmp = (K && *K == LHS) ? RHS : LHS;
4596 if (*K != KTmp || V != VTmp)
4607bool ARMTargetLowering::isUnsupportedFloatingType(
EVT VT)
const {
4618 EVT VT =
Op.getValueType();
4623 uint64_t SatConstant;
4696 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
4698 if (cast<ConstantSDNode>(ARMcc)->getZExtValue() ==
ARMCC::PL)
4700 return getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG);
4705 FPCCToARMCC(CC, CondCode, CondCode2, InvalidOnQNaN);
4716 bool swpCmpOps =
false;
4717 bool swpVselOps =
false;
4730 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl, InvalidOnQNaN);
4732 SDValue Result = getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG);
4736 SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl, InvalidOnQNaN);
4737 Result = getCMOV(dl, VT, Result, TrueVal, ARMcc2, CCR, Cmp2, DAG);
4747 if (!
N->hasOneUse())
4750 if (!
N->getNumValues())
4752 EVT VT = Op.getValueType();
4769 if (
LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op))
4771 Ld->getPointerInfo(), Ld->getAlignment(),
4772 Ld->getMemOperand()->getFlags());
4787 if (
LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) {
4788 SDValue Ptr = Ld->getBasePtr();
4791 Ld->getAlignment(), Ld->getMemOperand()->
getFlags());
4794 unsigned NewAlign =
MinAlign(Ld->getAlignment(), 4);
4798 Ld->getPointerInfo().getWithOffset(4), NewAlign,
4799 Ld->getMemOperand()->getFlags());
4817 bool LHSSeenZero =
false;
4819 bool RHSSeenZero =
false;
4821 if (LHSOk && RHSOk && (LHSSeenZero || RHSSeenZero)) {
4837 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
4840 Chain, Dest, ARMcc, CCR, Cmp);
4852 SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest };
4867 unsigned Opc = Cond.getOpcode();
4870 if (Cond.getResNo() == 1 &&
4880 std::tie(
Value, OverflowCmp) = getARMXALUOOp(Cond, DAG, ARMcc);
4932 std::tie(
Value, OverflowCmp) = getARMXALUOOp(LHS.
getValue(0), DAG, ARMcc);
4949 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
4952 Chain, Dest, ARMcc, CCR, Cmp);
4958 if (
SDValue Result = OptimizeVFPBrcond(Op, DAG))
4964 FPCCToARMCC(CC, CondCode, CondCode2, InvalidOnQNaN);
4967 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl, InvalidOnQNaN);
4970 SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp };
4998 Addr,
Op.getOperand(2), JTI);
5009 DAG.
getLoad(PTy, dl, Chain, Addr,
5017 EVT VT = Op.getValueType();
5020 if (Op.getValueType().getVectorElementType() ==
MVT::i32) {
5021 if (Op.getOperand(0).getValueType().getVectorElementType() ==
MVT::f32)
5026 const bool HasFullFP16 =
5030 const EVT OpTy = Op.getOperand(0).getValueType();
5043 Op = DAG.
getNode(Op.getOpcode(), dl, NewTy, Op.getOperand(0));
5048 EVT VT =
Op.getValueType();
5051 if (isUnsupportedFloatingType(
Op.getOperand(0).getValueType())) {
5060 false,
SDLoc(Op)).first;
5067 EVT VT = Op.getValueType();
5070 if (Op.getOperand(0).getValueType().getVectorElementType() ==
MVT::i32) {
5077 Op.getOperand(0).getValueType() ==
MVT::v8i16) &&
5078 "Invalid type for custom lowering!");
5080 const bool HasFullFP16 =
5095 switch (Op.getOpcode()) {
5107 Op = DAG.
getNode(CastOpc, dl, DestVecType, Op.getOperand(0));
5108 return DAG.
getNode(Opc, dl, VT, Op);
5112 EVT VT =
Op.getValueType();
5115 if (isUnsupportedFloatingType(VT)) {
5124 false,
SDLoc(Op)).first;
5135 EVT VT =
Op.getValueType();
5139 bool UseNEON = !InGPR && Subtarget->
hasNEON();
5220 EVT VT =
Op.getValueType();
5222 unsigned Depth = cast<ConstantSDNode>(
Op.getOperand(0))->getZExtValue();
5224 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
5243 EVT VT =
Op.getValueType();
5245 unsigned Depth = cast<ConstantSDNode>(
Op.getOperand(0))->getZExtValue();
5256unsigned ARMTargetLowering::getRegisterByName(
const char* RegName,
EVT VT,
5259 .
Case(
"sp", ARM::SP)
5275 &&
"ExpandREAD_REGISTER called for non-i64 type result.");
5284 Results.push_back(Read.getOperand(0));
5312 ConstantSDNode *Index = dyn_cast<ConstantSDNode>(Op.getOperand(1));
5318 const APInt &APIntIndex = Index->getAPIntValue();
5320 NewIndex *= APIntIndex;
5328 SDValue ExtractSrc = Op.getOperand(0);
5350 EVT SrcVT = Op.getValueType();
5351 EVT DstVT =
N->getValueType(0);
5352 const bool HasFullFP16 = Subtarget->
hasFullFP16();
5365 auto Move =
N->use_begin();
5369 SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1) };
5384 if (Op.getOperand(0).getValueType() ==
MVT::i32)
5407 auto ZeroExtend =
N->use_begin();
5409 ZeroExtend->getValueType(0) !=
MVT::i32)
5412 auto Copy = ZeroExtend->use_begin();
5477 assert(
Op.getNumOperands() == 3 &&
"Not a double-shift!");
5478 EVT VT =
Op.getValueType();
5497 SDValue LoBigShift = DAG.
getNode(Opc, dl, VT, ShOpHi, ExtraShAmt);
5505 ? DAG.
getNode(Opc, dl, VT, ShOpHi,
5521 assert(
Op.getNumOperands() == 3 &&
"Not a double-shift!");
5522 EVT VT =
Op.getValueType();
5578 EVT VT =
N->getValueType(0);
5625 if (!ST->hasV6T2Ops())
5634 EVT VT =
N->getValueType(0);
5637 assert(ST->hasNEON() &&
"Custom ctpop lowering requires NEON.");
5640 "Unexpected type for custom ctpop lowering");
5648 unsigned EltSize = 8;
5671 Op = Op.getOperand(0);
5673 APInt SplatBits, SplatUndef;
5674 unsigned SplatBitSize;
5677 !BVN->
isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs,
5679 SplatBitSize > ElementBits)
5690 assert(VT.
isVector() &&
"vector shift count is not a vector type");
5694 return (Cnt >= 0 && (isLong ? Cnt - 1 : Cnt) < ElementBits);
5705 assert(VT.
isVector() &&
"vector shift count is not a vector type");
5710 return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits / 2 : ElementBits));
5711 if (Cnt >= -(isNarrow ? ElementBits / 2 : ElementBits) && Cnt <= -1) {
5720 EVT VT =
N->getValueType(0);
5742 "unexpected vector shift opcode");
5744 if (
isVShiftRImm(
N->getOperand(1), VT,
false,
false, Cnt)) {
5745 unsigned VShiftOpc =
5747 return DAG.
getNode(VShiftOpc, dl, VT,
N->getOperand(0),
5753 EVT ShiftVT =
N->getOperand(1).getValueType();
5756 unsigned VShiftOpc =
5758 return DAG.
getNode(VShiftOpc, dl, VT,
N->getOperand(0), NegatedCount);
5763 EVT VT =
N->getValueType(0);
5772 "Unknown shift to lower!");
5774 unsigned ShOpc =
N->getOpcode();
5775 if (ST->hasMVEIntegerOps()) {
5812 Hi =
SDValue(Lo.getNode(), 1);
5821 if (ST->isThumb1Only())
5844 bool Invert =
false;
5848 SDValue Op0 = Op.getOperand(0);
5849 SDValue Op1 = Op.getOperand(1);
5850 SDValue CC = Op.getOperand(2);
5852 EVT VT = Op.getValueType();
5853 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
5870 Merged = DAG.
getNOT(dl, Merged, CmpVT);
5880 switch (SetCCOpcode) {
5921 switch (SetCCOpcode) {
5986 Result = DAG.
getNode(Opc, dl, CmpVT, Op0, Op1);
5989 Result = DAG.
getNode(Opc, dl, CmpVT, Op0, Op1);
5995 Result = DAG.
getNOT(dl, Result, VT);
6001 SDValue LHS = Op.getOperand(0);
6002 SDValue RHS = Op.getOperand(1);
6003 SDValue Carry = Op.getOperand(2);
6004 SDValue Cond = Op.getOperand(3);
6035 const SDLoc &dl,
EVT &VT,
bool is128Bits,
6037 unsigned OpCmode, Imm;
6047 switch (SplatBitSize) {
6052 assert((SplatBits & ~0xff) == 0 &&
"one byte splat value is too big");
6061 if ((SplatBits & ~0xff) == 0) {
6067 if ((SplatBits & ~0xff00) == 0) {
6070 Imm = SplatBits >> 8;
6081 if ((SplatBits & ~0xff) == 0) {
6087 if ((SplatBits & ~0xff00) == 0) {
6090 Imm = SplatBits >> 8;
6093 if ((SplatBits & ~0xff0000) == 0) {
6096 Imm = SplatBits >> 16;
6099 if ((SplatBits & ~0xff000000) == 0) {
6102 Imm = SplatBits >> 24;
6109 if ((SplatBits & ~0xffff) == 0 &&
6110 ((SplatBits | SplatUndef) & 0xff) == 0xff) {
6113 Imm = SplatBits >> 8;
6121 if ((SplatBits & ~0xffffff) == 0 &&
6122 ((SplatBits | SplatUndef) & 0xffff) == 0xffff) {
6125 Imm = SplatBits >> 16;
6140 uint64_t BitMask = 0xff;
6142 unsigned ImmMask = 1;
6144 for (
int ByteNum = 0; ByteNum < 8; ++ByteNum) {
6145 if (((SplatBits | SplatUndef) & BitMask) == BitMask) {
6148 }
else if ((SplatBits & BitMask) != 0) {
6157 Imm = ((Imm & 0xf) << 4) | ((Imm & 0xf0) >> 4);
6175 EVT VT =
Op.getValueType();
6182 if (
ST->genExecuteOnly()) {
6196 if (!
ST->isLittle())
6206 if (!
ST->hasVFP3Base())
6211 if (IsDouble && !Subtarget->
hasFP64())
6218 if (IsDouble || !
ST->useNEONForSinglePrecisionFP()) {
6236 if (!
ST->hasNEON() || (!IsDouble && !
ST->useNEONForSinglePrecisionFP()))
6245 if (IsDouble && (iVal & 0xffffffff) != (iVal >> 32))
6299 unsigned ExpectedElt = Imm;
6300 for (
unsigned i = 1; i < NumElts; ++i) {
6304 if (ExpectedElt == NumElts)
6307 if (M[i] < 0)
continue;
6308 if (ExpectedElt !=
static_cast<unsigned>(M[i]))
6316 bool &ReverseVEXT,
unsigned &Imm) {
6318 ReverseVEXT =
false;
6329 unsigned ExpectedElt = Imm;
6330 for (
unsigned i = 1; i < NumElts; ++i) {
6334 if (ExpectedElt == NumElts * 2) {
6339 if (M[i] < 0)
continue;
6340 if (ExpectedElt !=
static_cast<unsigned>(M[i]))
6356 "Only possible block sizes for VREV are: 16, 32, 64");
6363 unsigned BlockElts = M[0] + 1;
6371 for (
unsigned i = 0; i < NumElts; ++i) {
6372 if (M[i] < 0)
continue;
6373 if ((
unsigned) M[i] != (i - i%BlockElts) + (BlockElts - 1 - i%BlockElts))
6384 return VT ==
MVT::v8i8 && M.size() == 8;
6389 if (Mask.size() == Elements * 2)
6390 return Index / Elements;
6391 return Mask[Index] == 0 ? 0 : 1;
6421 if (M.size() != NumElts && M.size() != NumElts*2)
6429 for (
unsigned i = 0; i < M.size(); i += NumElts) {
6431 for (
unsigned j = 0; j < NumElts; j += 2) {
6432 if ((M[i+j] >= 0 && (
unsigned) M[i+j] != j + WhichResult) ||
6433 (M[i+j+1] >= 0 && (
unsigned) M[i+j+1] != j + NumElts + WhichResult))
6438 if (M.size() == NumElts*2)
6453 if (M.size() != NumElts && M.size() != NumElts*2)
6456 for (
unsigned i = 0; i < M.size(); i += NumElts) {
6458 for (
unsigned j = 0; j < NumElts; j += 2) {
6459 if ((M[i+j] >= 0 && (
unsigned) M[i+j] != j + WhichResult) ||
6460 (M[i+j+1] >= 0 && (
unsigned) M[i+j+1] != j + WhichResult))
6465 if (M.size() == NumElts*2)
6485 if (M.size() != NumElts && M.size() != NumElts*2)
6488 for (
unsigned i = 0; i < M.size(); i += NumElts) {
6490 for (
unsigned j = 0; j < NumElts; ++j) {
6491 if (M[i+j] >= 0 && (
unsigned) M[i+j] != 2 * j + WhichResult)
6496 if (M.size() == NumElts*2)
6515 if (M.size() != NumElts && M.size() != NumElts*2)
6518 unsigned Half = NumElts / 2;
6519 for (
unsigned i = 0; i < M.size(); i += NumElts) {
6521 for (
unsigned j = 0; j < NumElts; j += Half) {
6522 unsigned Idx = WhichResult;
6523 for (
unsigned k = 0; k < Half; ++k) {
6524 int MIdx = M[i + j + k];
6525 if (MIdx >= 0 && (
unsigned) MIdx !=
Idx)
6532 if (M.size() == NumElts*2)
6556 if (M.size() != NumElts && M.size() != NumElts*2)
6559 for (
unsigned i = 0; i < M.size(); i += NumElts) {
6561 unsigned Idx = WhichResult * NumElts / 2;
6562 for (
unsigned j = 0; j < NumElts; j += 2) {
6563 if ((M[i+j] >= 0 && (
unsigned) M[i+j] !=
Idx) ||
6564 (M[i+j+1] >= 0 && (
unsigned) M[i+j+1] !=
Idx + NumElts))
6570 if (M.size() == NumElts*2)
6589 if (M.size() != NumElts && M.size() != NumElts*2)
6592 for (
unsigned i = 0; i < M.size(); i += NumElts) {
6594 unsigned Idx = WhichResult * NumElts / 2;
6595 for (
unsigned j = 0; j < NumElts; j += 2) {
6596 if ((M[i+j] >= 0 && (
unsigned) M[i+j] !=
Idx) ||
6597 (M[i+j+1] >= 0 && (
unsigned) M[i+j+1] !=
Idx))
6603 if (M.size() == NumElts*2)
6616 unsigned &WhichResult,
6619 if (
isVTRNMask(ShuffleMask, VT, WhichResult))
6621 if (
isVUZPMask(ShuffleMask, VT, WhichResult))
6623 if (
isVZIPMask(ShuffleMask, VT, WhichResult))
6641 if (NumElts != M.size())
6645 for (
unsigned i = 0; i != NumElts; ++i)
6646 if (M[i] >= 0 && M[i] != (
int) (NumElts - 1 - i))
6658 if (!isa<ConstantSDNode>(
N))
6660 Val = cast<ConstantSDNode>(
N)->getZExtValue();
6662 if (ST->isThumb1Only()) {
6663 if (Val <= 255 || ~Val <= 255)
6678 EVT VT =
Op.getValueType();
6680 APInt SplatBits, SplatUndef;
6681 unsigned SplatBitSize;
6683 if (BVN->
isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
6687 if ((
ST->hasNEON() && SplatBitSize <= 64) ||
6688 (
ST->hasMVEIntegerOps() && SplatBitSize <= 32)) {
6702 uint64_t NegatedImm = (~SplatBits).getZExtValue();
6731 bool isOnlyLowElement =
true;
6732 bool usesOnlyOneValue =
true;
6733 bool hasDominantValue =
false;
6740 for (
unsigned i = 0; i < NumElts; ++i) {
6745 isOnlyLowElement =
false;
6746 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V))
6749 ValueCounts.
insert(std::make_pair(V, 0));
6750 unsigned &Count = ValueCounts[V];
6753 if (++Count > (NumElts / 2)) {
6754 hasDominantValue =
true;
6758 if (ValueCounts.
size() != 1)
6759 usesOnlyOneValue =
false;
6760 if (!
Value.getNode() && !ValueCounts.
empty())
6763 if (ValueCounts.
empty())
6775 if (hasDominantValue && EltSize <= 32) {
6786 (constIndex = dyn_cast<ConstantSDNode>(
Value->getOperand(1)))) {
6791 if (VT !=
Value->getOperand(0).getValueType()) {
6804 if (!usesOnlyOneValue) {
6807 for (
unsigned I = 0;
I < NumElts; ++
I) {
6824 for (
unsigned i = 0; i < NumElts; ++i)
6829 Val = LowerBUILD_VECTOR(Val, DAG, ST);
6833 if (usesOnlyOneValue) {
6848 SDValue shuffle = ReconstructShuffle(Op, DAG);
6864 HVT, dl,
makeArrayRef(&Ops[NumElts / 2], NumElts / 2));
6874 if (EltSize >= 32) {
6880 for (
unsigned i = 0; i < NumElts; ++i)
6894 for (
unsigned i = 0 ; i < NumElts; ++i) {
6913 EVT VT =
Op.getValueType();
6916 struct ShuffleSourceInfo {
6918 unsigned MinElt = std::numeric_limits<unsigned>::max();
6919 unsigned MaxElt = 0;
6929 int WindowScale = 1;
6931 ShuffleSourceInfo(
SDValue Vec) : Vec(Vec), ShuffleVec(Vec) {}
6939 for (
unsigned i = 0; i < NumElts; ++i) {
6947 }
else if (!isa<ConstantSDNode>(V.
getOperand(1))) {
6956 if (Source == Sources.
end())
6960 unsigned EltNo = cast<ConstantSDNode>(V.
getOperand(1))->getZExtValue();
6967 if (Sources.
size() > 2)
6973 for (
auto &Source : Sources) {
6974 EVT SrcEltTy =
Source.Vec.getValueType().getVectorElementType();
6975 if (SrcEltTy.
bitsLT(SmallestEltTy))
6976 SmallestEltTy = SrcEltTy;
6978 unsigned ResMultiplier =
6986 for (
auto &Src : Sources) {
7012 if (Src.MaxElt - Src.MinElt >= NumSrcElts) {
7017 if (Src.MinElt >= NumSrcElts) {
7022 Src.WindowBase = -NumSrcElts;
7023 }
else if (Src.MaxElt < NumSrcElts) {
7040 Src.WindowBase = -Src.MinElt;
7047 for (
auto &Src : Sources) {
7049 if (SrcEltTy == SmallestEltTy)
7054 Src.WindowBase *= Src.WindowScale;
7067 if (
Entry.isUndef())
7071 int EltNo = cast<ConstantSDNode>(
Entry.getOperand(1))->getSExtValue();
7076 EVT OrigEltTy =
Entry.getOperand(0).getValueType().getVectorElementType();
7079 int LanesDefined = BitsDefined / BitsPerShuffleLane;
7083 int *LaneMask = &
Mask[i * ResMultiplier];
7085 int ExtractBase = EltNo * Src->WindowScale + Src->WindowBase;
7086 ExtractBase += NumElts * (Src - Sources.begin());
7087 for (
int j = 0; j < LanesDefined; ++j)
7088 LaneMask[j] = ExtractBase + j;
7097 assert(Sources.size() <= 2 &&
"Too many sources!");
7100 for (
unsigned i = 0; i < Sources.size(); ++i)
7127 unsigned OpNum = (PFEntry >> 26) & 0x0F;
7147 unsigned PFIndexes[4];
7148 for (
unsigned i = 0; i != 4; ++i) {
7152 PFIndexes[i] = M[i];
7156 unsigned PFTableIndex =
7157 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
7159 unsigned Cost = (PFEntry >> 30);
7165 bool ReverseVEXT, isV_UNDEF;
7166 unsigned Imm, WhichResult;
7169 if (EltSize >= 32 ||
7175 else if (Subtarget->
hasNEON() &&
7192 unsigned OpNum = (PFEntry >> 26) & 0x0F;
7193 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
7194 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1);
7197 if (LHSID == (1*9+2)*9+3)
return LHS;
7198 assert(LHSID == ((4*9+5)*9+6)*9+7 &&
"Illegal OP_COPY!");
7251 SDValue V1 = Op.getOperand(0);
7252 SDValue V2 = Op.getOperand(1);
7257 I = ShuffleMask.
begin(),
E = ShuffleMask.
end();
I !=
E; ++
I)
7260 if (V2.getNode()->isUndef())
7271 SDValue OpLHS = Op.getOperand(0);
7275 "Expect an v8i16/v16i8 type");
7280 unsigned ExtractNum = (VT ==
MVT::v16i8) ? 8 : 4;
7287 SDValue V1 = Op.getOperand(0);
7288 SDValue V2 = Op.getOperand(1);
7290 EVT VT = Op.getValueType();
7302 if (EltSize <= 32) {
7306 if (Lane == -1) Lane = 0;
7317 bool IsScalarToVector =
true;
7320 IsScalarToVector =
false;
7323 if (IsScalarToVector)
7330 bool ReverseVEXT =
false;
7332 if (ST->hasNEON() &&
isVEXTMask(ShuffleMask, VT, ReverseVEXT, Imm)) {
7356 unsigned WhichResult = 0;
7357 bool isV_UNDEF =
false;
7358 if (ST->hasNEON()) {
7360 ShuffleMask, VT, WhichResult, isV_UNDEF)) {
7391 }) &&
"Unexpected shuffle index into UNDEF operand!");
7394 ShuffleMask, SubVT, WhichResult, isV_UNDEF)) {
7397 assert((WhichResult == 0) &&
7398 "In-place shuffle of concat can only have one result!");
7411 unsigned PFIndexes[4];
7412 for (
unsigned i = 0; i != 4; ++i) {
7413 if (ShuffleMask[i] < 0)
7416 PFIndexes[i] = ShuffleMask[i];
7420 unsigned PFTableIndex =
7421 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
7423 unsigned Cost = (PFEntry >> 30);
7429 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
7430 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1);
7440 if (EltSize >= 32) {
7448 for (
unsigned i = 0; i < NumElts; ++i) {
7449 if (ShuffleMask[i] < 0)
7453 ShuffleMask[i] < (
int)NumElts ? V1 : V2,
7475 if (!isa<ConstantSDNode>(Lane))
7501 IVecIn, IElt, Lane);
7510 SDValue Lane = Op.getOperand(1);
7511 if (!isa<ConstantSDNode>(Lane))
7514 SDValue Vec = Op.getOperand(0);
7526 assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 &&
7527 "unexpected CONCAT_VECTORS");
7530 SDValue Op0 = Op.getOperand(0);
7531 SDValue Op1 = Op.getOperand(1);
7549 EVT VT =
N->getValueType(0);
7551 SDNode *BVN =
N->getOperand(0).getNode();
7556 unsigned HiElt = 1 - LoElt;
7561 if (!Lo0 || !Hi0 || !Lo1 || !Hi1)
7577 for (
unsigned i = 0, e =
N->getNumOperands(); i != e; ++i) {
7578 SDNode *Elt =
N->getOperand(i).getNode();
7581 unsigned HalfSize = EltSize / 2;
7583 if (!
isIntN(HalfSize,
C->getSExtValue()))
7586 if (!
isUIntN(HalfSize,
C->getZExtValue()))
7624 switch (OrigSimpleTy) {
7640 unsigned ExtOpcode) {
7663 if (ExtendedTy == LD->getMemoryVT())
7664 return DAG.
getLoad(LD->getMemoryVT(),
SDLoc(LD), LD->getChain(),
7665 LD->getBasePtr(), LD->getPointerInfo(),
7666 LD->getAlignment(), LD->getMemOperand()->getFlags());
7672 LD->getChain(), LD->getBasePtr(), LD->getPointerInfo(),
7673 LD->getMemoryVT(), LD->getAlignment(),
7674 LD->getMemOperand()->getFlags());
7686 N->getOperand(0)->getValueType(0),
7692 "Expected extending load");
7698 DAG.
getNode(Opcode,
SDLoc(newLoad), LD->getValueType(0), newLoad);
7707 SDNode *BVN =
N->getOperand(0).getNode();
7717 EVT VT =
N->getValueType(0);
7723 for (
unsigned i = 0; i != NumElts; ++i) {
7725 const APInt &CInt =
C->getAPIntValue();
7734 unsigned Opcode =
N->getOpcode();
7736 SDNode *N0 =
N->getOperand(0).getNode();
7737 SDNode *N1 =
N->getOperand(1).getNode();
7745 unsigned Opcode =
N->getOpcode();
7747 SDNode *N0 =
N->getOperand(0).getNode();
7748 SDNode *N1 =
N->getOperand(1).getNode();
7758 EVT VT = Op.getValueType();
7760 "unexpected type for custom-lowering ISD::MUL");
7761 SDNode *N0 = Op.getOperand(0).getNode();
7762 SDNode *N1 = Op.getOperand(1).getNode();
7763 unsigned NewOpc = 0;
7767 if (isN0SExt && isN1SExt)
7772 if (isN0ZExt && isN1ZExt)
7774 else if (isN1SExt || isN1ZExt) {
7808 "unexpected types for extended operands to VMULL");
7809 return DAG.
getNode(NewOpc, DL, VT, Op0, Op1);
7901 EVT VT = Op.getValueType();
7903 "unexpected type for custom-lowering ISD::SDIV");
7906 SDValue N0 = Op.getOperand(0);
7907 SDValue N1 = Op.getOperand(1);
7937 EVT VT = Op.getValueType();
7939 "unexpected type for custom-lowering ISD::UDIV");
7942 SDValue N0 = Op.getOperand(0);
7943 SDValue N1 = Op.getOperand(1);
8013 EVT VT =
N->getValueType(0);
8016 SDValue Carry = Op.getOperand(2);
8027 Op.getOperand(1), Carry);
8041 Op.getOperand(1), Carry);
8062 EVT ArgVT =
Arg.getValueType();
8074 bool ShouldUseSRet = Subtarget->
isAPCS_ABI();
8076 if (ShouldUseSRet) {
8078 const uint64_t ByteSize = DL.getTypeAllocSize(
RetTy);
8079 const unsigned StackAlign = DL.getPrefTypeAlignment(
RetTy);
8086 Entry.IsSExt =
false;
8087 Entry.IsZExt =
false;
8088 Entry.IsSRet =
true;
8096 Entry.IsSExt =
false;
8097 Entry.IsZExt =
false;
8101 (ArgVT ==
MVT::f64) ? RTLIB::SINCOS_STRET_F64 : RTLIB::SINCOS_STRET_F32;
8110 .setDiscardResult(ShouldUseSRet);
8111 std::pair<SDValue, SDValue> CallResult =
LowerCallTo(CLI);
8114 return CallResult.first;
8133 EVT VT =
Op.getValueType();
8135 "unexpected type for custom lowering DIV");
8141 const char *
Name =
nullptr;
8151 for (
auto AI : {1, 0}) {
8153 Arg.Node =
Op.getOperand(AI);
8158 CallLoweringInfo CLI(DAG);
8162 ES, std::move(Args));
8172ARMTargetLowering::BuildSDIVPow2(
SDNode *
N,
const APInt &Divisor,
8180 const bool MinSize =
ST.hasMinSize();
8181 const bool HasDivide =
ST.isThumb() ?
ST.hasDivideInThumbMode()
8182 :
ST.hasDivideInARMMode();
8186 if (
N->getOperand(0).getValueType().isVector())
8191 if (!(MinSize && HasDivide))
8204 if (Divisor.
sgt(128))
8213 "unexpected type for custom lowering DIV");
8219 return LowerWindowsDIVLibCall(Op, DAG,
Signed, DBZCHK);
8235void ARMTargetLowering::ExpandDIV_Windows(
8242 "unexpected type for custom lowering DIV");
8275 SDValue Ops[] = {
N->getOperand(0),
8304 const SDValue Ops[] = { RegClass, VLo, SubReg0, VHi, SubReg1 };
8313 "AtomicCmpSwap on types less than 64 should be legal");
8319 ARM::CMP_SWAP_64,
SDLoc(
N),
8341 "Custom lowering is MSVCRT specific!");
8344 SDValue Val = Op.getOperand(0);
8355 Entry.IsZExt =
true;
8356 Args.push_back(
Entry);
8360 Entry.IsZExt =
true;
8361 Args.push_back(
Entry);
8373 F.getReturnType() == LCRTy;
8382 std::pair<SDValue, SDValue> CI = TLI.
LowerCallTo(CLI);
8385 return !CI.second.getNode() ? DAG.
getRoot() : CI.first;
8390 switch (Op.getOpcode()) {
8421 case ISD::SREM:
return LowerREM(Op.getNode(), DAG);
8422 case ISD::UREM:
return LowerREM(Op.getNode(), DAG);
8441 return LowerDIV_Windows(Op, DAG,
true);
8445 return LowerDIV_Windows(Op, DAG,
false);
8451 return LowerSignedALUO(Op, DAG);
8454 return LowerUnsignedALUO(Op, DAG);
8462 return LowerDYNAMIC_STACKALLOC(Op, DAG);
8473 unsigned IntNo = cast<ConstantSDNode>(
N->getOperand(0))->getZExtValue();
8475 if (IntNo == Intrinsic::arm_smlald)
8477 else if (IntNo == Intrinsic::arm_smlaldx)
8479 else if (IntNo == Intrinsic::arm_smlsld)
8481 else if (IntNo == Intrinsic::arm_smlsldx)
8496 N->getOperand(1),
N->getOperand(2),
8508 switch (
N->getOpcode()) {
8524 Res = LowerREM(
N, DAG);
8528 Res = LowerDivRem(
SDValue(
N, 0), DAG);
8566 "ROPI/RWPI not currently supported with SjLj");
8576 bool isThumb2 = Subtarget->
isThumb2();
8579 unsigned PCAdj = (
isThumb || isThumb2) ? 4 : 8;
8585 : &ARM::GPRRegClass;
8603 unsigned NewVReg1 =
MRI->createVirtualRegister(TRC);
8604 BuildMI(*MBB,
MI, dl,
TII->get(ARM::t2LDRpci), NewVReg1)
8609 unsigned NewVReg2 =
MRI->createVirtualRegister(TRC);
8610 BuildMI(*MBB,
MI, dl,
TII->get(ARM::t2ORRri), NewVReg2)
8615 unsigned NewVReg3 =
MRI->createVirtualRegister(TRC);
8616 BuildMI(*MBB,
MI, dl,
TII->get(ARM::tPICADD), NewVReg3)
8633 unsigned NewVReg1 =
MRI->createVirtualRegister(TRC);
8634 BuildMI(*MBB,
MI, dl,
TII->get(ARM::tLDRpci), NewVReg1)
8638 unsigned NewVReg2 =
MRI->createVirtualRegister(TRC);
8639 BuildMI(*MBB,
MI, dl,
TII->get(ARM::tPICADD), NewVReg2)
8643 unsigned NewVReg3 =
MRI->createVirtualRegister(TRC);
8648 unsigned NewVReg4 =
MRI->createVirtualRegister(TRC);
8654 unsigned NewVReg5 =
MRI->createVirtualRegister(TRC);
8655 BuildMI(*MBB,
MI, dl,
TII->get(ARM::tADDframe), NewVReg5)
8669 unsigned NewVReg1 =
MRI->createVirtualRegister(TRC);
8675 unsigned NewVReg2 =
MRI->createVirtualRegister(TRC);
8699 : &ARM::GPRnopcRegClass;
8704 unsigned MaxCSNum = 0;
8707 if (!BB->isEHPad())
continue;
8712 II = BB->begin(), IE = BB->end(); II != IE; ++II) {
8713 if (!II->isEHLabel())
continue;
8715 MCSymbol *Sym = II->getOperand(0).getMCSymbol();
8720 CSI = CallSiteIdxs.
begin(),
CSE = CallSiteIdxs.
end();
8721 CSI !=
CSE; ++CSI) {
8722 CallSiteNumToLPad[*CSI].push_back(&*BB);
8723 MaxCSNum = std::max(MaxCSNum, *CSI);
8730 std::vector<MachineBasicBlock*> LPadList;
8732 LPadList.reserve(CallSiteNumToLPad.
size());
8733 for (
unsigned I = 1;
I <= MaxCSNum; ++
I) {
8736 II = MBBList.
begin(), IE = MBBList.
end(); II != IE; ++II) {
8737 LPadList.push_back(*II);
8738 InvokeBBs.
insert((*II)->pred_begin(), (*II)->pred_end());
8742 assert(!LPadList.empty() &&
8743 "No landing pad destinations for the dispatch jump table!");
8757 unsigned trap_opcode;
8759 trap_opcode = ARM::tTRAP;
8761 trap_opcode = Subtarget->
useNaClTrap() ? ARM::TRAPNaCl : ARM::TRAP;
8776 SetupEntryBlockForSjLj(
MI, MBB, DispatchBB, FI);
8783 MIB =
BuildMI(DispatchBB, dl,
TII->get(ARM::Int_eh_sjlj_dispatchsetup));
8795 unsigned NumLPads = LPadList.size();
8797 unsigned NewVReg1 =
MRI->createVirtualRegister(TRC);
8798 BuildMI(DispatchBB, dl,
TII->get(ARM::t2LDRi12), NewVReg1)
8804 if (NumLPads < 256) {
8805 BuildMI(DispatchBB, dl,
TII->get(ARM::t2CMPri))
8810 unsigned VReg1 =
MRI->createVirtualRegister(TRC);
8811 BuildMI(DispatchBB, dl,
TII->get(ARM::t2MOVi16), VReg1)
8812 .
addImm(NumLPads & 0xFFFF)
8815 unsigned VReg2 = VReg1;
8816 if ((NumLPads & 0xFFFF0000) != 0) {
8817 VReg2 =
MRI->createVirtualRegister(TRC);
8818 BuildMI(DispatchBB, dl,
TII->get(ARM::t2MOVTi16), VReg2)
8824 BuildMI(DispatchBB, dl,
TII->get(ARM::t2CMPrr))
8830 BuildMI(DispatchBB, dl,
TII->get(ARM::t2Bcc))
8835 unsigned NewVReg3 =
MRI->createVirtualRegister(TRC);
8836 BuildMI(DispContBB, dl,
TII->get(ARM::t2LEApcrelJT), NewVReg3)
8840 unsigned NewVReg4 =
MRI->createVirtualRegister(TRC);
8841 BuildMI(DispContBB, dl,
TII->get(ARM::t2ADDrs), NewVReg4)
8848 BuildMI(DispContBB, dl,
TII->get(ARM::t2BR_JT))
8852 }
else if (Subtarget->
isThumb()) {
8853 unsigned NewVReg1 =
MRI->createVirtualRegister(TRC);
8854 BuildMI(DispatchBB, dl,
TII->get(ARM::tLDRspi), NewVReg1)
8860 if (NumLPads < 256) {
8861 BuildMI(DispatchBB, dl,
TII->get(ARM::tCMPi8))
8876 unsigned VReg1 =
MRI->createVirtualRegister(TRC);
8877 BuildMI(DispatchBB, dl,
TII->get(ARM::tLDRpci))
8881 BuildMI(DispatchBB, dl,
TII->get(ARM::tCMPr))
8892 unsigned NewVReg2 =
MRI->createVirtualRegister(TRC);
8893 BuildMI(DispContBB, dl,
TII->get(ARM::tLSLri), NewVReg2)
8899 unsigned NewVReg3 =
MRI->createVirtualRegister(TRC);
8900 BuildMI(DispContBB, dl,
TII->get(ARM::tLEApcrelJT), NewVReg3)
8904 unsigned NewVReg4 =
MRI->createVirtualRegister(TRC);
8905 BuildMI(DispContBB, dl,
TII->get(ARM::tADDrr), NewVReg4)
8914 unsigned NewVReg5 =
MRI->createVirtualRegister(TRC);
8915 BuildMI(DispContBB, dl,
TII->get(ARM::tLDRi), NewVReg5)
8921 unsigned NewVReg6 = NewVReg5;
8922 if (IsPositionIndependent) {
8923 NewVReg6 =
MRI->createVirtualRegister(TRC);
8924 BuildMI(DispContBB, dl,
TII->get(ARM::tADDrr), NewVReg6)
8931 BuildMI(DispContBB, dl,
TII->get(ARM::tBR_JTr))
8935 unsigned NewVReg1 =
MRI->createVirtualRegister(TRC);
8936 BuildMI(DispatchBB, dl,
TII->get(ARM::LDRi12), NewVReg1)
8942 if (NumLPads < 256) {
8943 BuildMI(DispatchBB, dl,
TII->get(ARM::CMPri))
8948 unsigned VReg1 =
MRI->createVirtualRegister(TRC);
8949 BuildMI(DispatchBB, dl,
TII->get(ARM::MOVi16), VReg1)
8950 .
addImm(NumLPads & 0xFFFF)
8953 unsigned VReg2 = VReg1;
8954 if ((NumLPads & 0xFFFF0000) != 0) {
8955 VReg2 =
MRI->createVirtualRegister(TRC);
8956 BuildMI(DispatchBB, dl,
TII->get(ARM::MOVTi16), VReg2)
8962 BuildMI(DispatchBB, dl,
TII->get(ARM::CMPrr))
8977 unsigned VReg1 =
MRI->createVirtualRegister(TRC);
8978 BuildMI(DispatchBB, dl,
TII->get(ARM::LDRcp))
8983 BuildMI(DispatchBB, dl,
TII->get(ARM::CMPrr))
8994 unsigned NewVReg3 =
MRI->createVirtualRegister(TRC);
8995 BuildMI(DispContBB, dl,
TII->get(ARM::MOVsi), NewVReg3)
9000 unsigned NewVReg4 =
MRI->createVirtualRegister(TRC);
9001 BuildMI(DispContBB, dl,
TII->get(ARM::LEApcrelJT), NewVReg4)
9007 unsigned NewVReg5 =
MRI->createVirtualRegister(TRC);
9008 BuildMI(DispContBB, dl,
TII->get(ARM::LDRrs), NewVReg5)
9015 if (IsPositionIndependent) {
9016 BuildMI(DispContBB, dl,
TII->get(ARM::BR_JTadd))
9021 BuildMI(DispContBB, dl,
TII->get(ARM::BR_JTr))
9029 for (std::vector<MachineBasicBlock*>::iterator
9030 I = LPadList.begin(),
E = LPadList.end();
I !=
E; ++
I) {
9032 if (SeenMBBs.
insert(CurMBB).second)
9045 while (!Successors.empty()) {
9054 BB->normalizeSuccProbs();
9061 II = BB->rbegin(), IE = BB->rend(); II != IE; ++II) {
9062 if (!II->isCall())
continue;
9066 OI = II->operands_begin(), OE = II->operands_end();
9068 if (!OI->isReg())
continue;
9069 DefRegs[OI->getReg()] =
true;
9074 for (
unsigned i = 0; SavedRegs[i] != 0; ++i) {
9075 unsigned Reg = SavedRegs[i];
9077 !ARM::tGPRRegClass.contains(Reg) &&
9078 !ARM::hGPRRegClass.contains(Reg))
9080 if (Subtarget->
isThumb1Only() && !ARM::tGPRRegClass.contains(Reg))
9082 if (!Subtarget->
isThumb() && !ARM::GPRRegClass.contains(Reg))
9096 (*I)->setIsEHPad(
false);
9099 MI.eraseFromParent();
9113static unsigned getLdOpcode(
unsigned LdSize,
bool IsThumb1,
bool IsThumb2) {
9115 return LdSize == 16 ? ARM::VLD1q32wb_fixed
9116 : LdSize == 8 ? ARM::VLD1d32wb_fixed : 0;
9118 return LdSize == 4 ? ARM::tLDRi
9119 : LdSize == 2 ? ARM::tLDRHi
9120 : LdSize == 1 ? ARM::tLDRBi : 0;
9122 return LdSize == 4 ? ARM::t2LDR_POST
9123 : LdSize == 2 ? ARM::t2LDRH_POST
9124 : LdSize == 1 ? ARM::t2LDRB_POST : 0;
9125 return LdSize == 4 ? ARM::LDR_POST_IMM
9126 : LdSize == 2 ? ARM::LDRH_POST
9127 : LdSize == 1 ? ARM::LDRB_POST_IMM : 0;
9132static unsigned getStOpcode(
unsigned StSize,
bool IsThumb1,
bool IsThumb2) {
9134 return StSize == 16 ? ARM::VST1q32wb_fixed
9135 : StSize == 8 ? ARM::VST1d32wb_fixed : 0;
9137 return StSize == 4 ? ARM::tSTRi
9138 : StSize == 2 ? ARM::tSTRHi
9139 : StSize == 1 ? ARM::tSTRBi : 0;
9141 return StSize == 4 ? ARM::t2STR_POST
9142 : StSize == 2 ? ARM::t2STRH_POST
9143 : StSize == 1 ? ARM::t2STRB_POST : 0;
9144 return StSize == 4 ? ARM::STR_POST_IMM
9145 : StSize == 2 ? ARM::STRH_POST
9146 : StSize == 1 ? ARM::STRB_POST_IMM : 0;
9153 unsigned LdSize,
unsigned Data,
unsigned AddrIn,
9154 unsigned AddrOut,
bool IsThumb1,
bool IsThumb2) {
9155 unsigned LdOpc =
getLdOpcode(LdSize, IsThumb1, IsThumb2);
9156 assert(LdOpc != 0 &&
"Should have a load opcode");
9163 }
else if (IsThumb1) {
9169 BuildMI(*BB, Pos, dl,
TII->get(ARM::tADDi8), AddrOut)
9174 }
else if (IsThumb2) {
9194 unsigned StSize,
unsigned Data,
unsigned AddrIn,
9195 unsigned AddrOut,
bool IsThumb1,
bool IsThumb2) {
9196 unsigned StOpc =
getStOpcode(StSize, IsThumb1, IsThumb2);
9197 assert(StOpc != 0 &&
"Should have a store opcode");
9199 BuildMI(*BB, Pos, dl,
TII->get(StOpc), AddrOut)
9204 }
else if (IsThumb1) {
9211 BuildMI(*BB, Pos, dl,
TII->get(ARM::tADDi8), AddrOut)
9216 }
else if (IsThumb2) {
9217 BuildMI(*BB, Pos, dl,
TII->get(StOpc), AddrOut)
9223 BuildMI(*BB, Pos, dl,
TII->get(StOpc), AddrOut)
9242 unsigned dest =
MI.getOperand(0).getReg();
9243 unsigned src =
MI.getOperand(1).getReg();
9244 unsigned SizeVal =
MI.getOperand(2).getImm();
9245 unsigned Align =
MI.getOperand(3).getImm();
9250 unsigned UnitSize = 0;
9255 bool IsThumb2 = Subtarget->
isThumb2();
9256 bool IsThumb = Subtarget->
isThumb();
9260 }
else if (Align & 2) {
9264 if (!MF->
getFunction().hasFnAttribute(Attribute::NoImplicitFloat) &&
9266 if ((Align % 16 == 0) && SizeVal >= 16)
9268 else if ((Align % 8 == 0) && SizeVal >= 8)
9277 bool IsNeon = UnitSize >= 8;
9278 TRC = IsThumb ? &ARM::tGPRRegClass : &ARM::GPRRegClass;
9280 VecTRC = UnitSize == 16 ? &ARM::DPairRegClass
9281 : UnitSize == 8 ? &ARM::DPRRegClass
9284 unsigned BytesLeft = SizeVal % UnitSize;
9285 unsigned LoopSize = SizeVal - BytesLeft;
9287 if (SizeVal <= Subtarget->getMaxInlineSizeThreshold()) {
9291 unsigned srcIn = src;
9292 unsigned destIn = dest;
9293 for (
unsigned i = 0; i < LoopSize; i+=UnitSize) {
9294 unsigned srcOut =
MRI.createVirtualRegister(TRC);
9295 unsigned destOut =
MRI.createVirtualRegister(TRC);
9296 unsigned scratch =
MRI.createVirtualRegister(IsNeon ? VecTRC : TRC);
9298 IsThumb1, IsThumb2);
9300 IsThumb1, IsThumb2);
9308 for (
unsigned i = 0; i < BytesLeft; i++) {
9309 unsigned srcOut =
MRI.createVirtualRegister(TRC);
9310 unsigned destOut =
MRI.createVirtualRegister(TRC);
9311 unsigned scratch =
MRI.createVirtualRegister(TRC);
9313 IsThumb1, IsThumb2);
9315 IsThumb1, IsThumb2);
9319 MI.eraseFromParent();
9354 unsigned varEnd =
MRI.createVirtualRegister(TRC);
9356 unsigned Vtmp = varEnd;
9357 if ((LoopSize & 0xFFFF0000) != 0)
9358 Vtmp =
MRI.createVirtualRegister(TRC);
9359 BuildMI(BB, dl,
TII->get(IsThumb ? ARM::t2MOVi16 : ARM::MOVi16), Vtmp)
9360 .
addImm(LoopSize & 0xFFFF)
9363 if ((LoopSize & 0xFFFF0000) != 0)
9364 BuildMI(BB, dl,
TII->get(IsThumb ? ARM::t2MOVTi16 : ARM::MOVTi16), varEnd)
9404 unsigned varLoop =
MRI.createVirtualRegister(TRC);
9405 unsigned varPhi =
MRI.createVirtualRegister(TRC);
9406 unsigned srcLoop =
MRI.createVirtualRegister(TRC);
9407 unsigned srcPhi =
MRI.createVirtualRegister(TRC);
9408 unsigned destLoop =
MRI.createVirtualRegister(TRC);
9409 unsigned destPhi =
MRI.createVirtualRegister(TRC);
9423 unsigned scratch =
MRI.createVirtualRegister(IsNeon ? VecTRC : TRC);
9425 IsThumb1, IsThumb2);
9427 IsThumb1, IsThumb2);
9439 TII->get(IsThumb2 ? ARM::t2SUBri : ARM::SUBri), varLoop);
9448 TII->get(IsThumb1 ? ARM::tBcc : IsThumb2 ? ARM::t2Bcc : ARM::Bcc))
9457 auto StartOfExit = exitMBB->
begin();
9461 unsigned srcIn = srcLoop;
9462 unsigned destIn = destLoop;
9463 for (
unsigned i = 0; i < BytesLeft; i++) {
9464 unsigned srcOut =
MRI.createVirtualRegister(TRC);
9465 unsigned destOut =
MRI.createVirtualRegister(TRC);
9466 unsigned scratch =
MRI.createVirtualRegister(TRC);
9467 emitPostLd(BB, StartOfExit,
TII, dl, 1, scratch, srcIn, srcOut,
9468 IsThumb1, IsThumb2);
9469 emitPostSt(BB, StartOfExit,
TII, dl, 1, scratch, destIn, destOut,
9470 IsThumb1, IsThumb2);
9475 MI.eraseFromParent();
9487 "__chkstk is only supported on Windows");
9488 assert(Subtarget->
isThumb2() &&
"Windows on ARM requires Thumb-2 mode");
9508 switch (
TM.getCodeModel()) {
9526 unsigned Reg =
MRI.createVirtualRegister(&ARM::rGPRRegClass);
9550 MI.eraseFromParent();
9569 BuildMI(TrapBB, DL,
TII->get(ARM::t__brkdiv0));
9582 MI.eraseFromParent();
9606 if (miI == BB->
end()) {
9609 sItr != sEnd; ++sItr) {
9618 SelectItr->addRegisterKilled(ARM::CPSR,
TRI);
9627 bool isThumb2 = Subtarget->
isThumb2();
9628 switch (
MI.getOpcode()) {
9635 case ARM::tLDR_postidx: {
9639 .
add(
MI.getOperand(2))
9640 .
add(
MI.getOperand(3))
9641 .
add(
MI.getOperand(4))
9642 .
add(
MI.getOperand(0))
9644 MI.eraseFromParent();
9651 case ARM::t2STR_preidx:
9652 MI.setDesc(
TII->get(ARM::t2STR_PRE));
9654 case ARM::t2STRB_preidx:
9655 MI.setDesc(
TII->get(ARM::t2STRB_PRE));
9657 case ARM::t2STRH_preidx:
9658 MI.setDesc(
TII->get(ARM::t2STRH_PRE));
9661 case ARM::STRi_preidx:
9662 case ARM::STRBi_preidx: {
9663 unsigned NewOpc =
MI.getOpcode() == ARM::STRi_preidx ? ARM::STR_PRE_IMM
9664 : ARM::STRB_PRE_IMM;
9666 unsigned Offset =
MI.getOperand(4).getImm();
9674 .
add(
MI.getOperand(0))
9675 .
add(
MI.getOperand(1))
9676 .
add(
MI.getOperand(2))
9678 .
add(
MI.getOperand(5))
9679 .
add(
MI.getOperand(6))
9681 MI.eraseFromParent();
9684 case ARM::STRr_preidx:
9685 case ARM::STRBr_preidx:
9686 case ARM::STRH_preidx: {
9688 switch (
MI.getOpcode()) {
9690 case ARM::STRr_preidx: NewOpc = ARM::STR_PRE_REG;
break;
9691 case ARM::STRBr_preidx: NewOpc = ARM::STRB_PRE_REG;
break;
9692 case ARM::STRH_preidx: NewOpc = ARM::STRH_PRE;
break;
9695 for (
unsigned i = 0; i <
MI.getNumOperands(); ++i)
9696 MIB.
add(
MI.getOperand(i));
9697 MI.eraseFromParent();
9701 case ARM::tMOVCCr_pseudo: {
9719 F->insert(It, copy0MBB);
9720 F->insert(It, sinkMBB);
9724 if (!
MI.killsRegister(ARM::CPSR) &&
9741 .
addReg(
MI.getOperand(4).getReg());
9761 MI.eraseFromParent();
9766 case ARM::BCCZi64: {
9772 bool RHSisZero =
MI.getOpcode() == ARM::BCCZi64;
9774 unsigned LHS1 =
MI.getOperand(1).getReg();
9775 unsigned LHS2 =
MI.getOperand(2).getReg();
9777 BuildMI(BB, dl,
TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
9781 BuildMI(BB, dl,
TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
9785 unsigned RHS1 =
MI.getOperand(3).getReg();
9786 unsigned RHS2 =
MI.getOperand(4).getReg();
9787 BuildMI(BB, dl,
TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr))
9791 BuildMI(BB, dl,
TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr))
9801 BuildMI(BB, dl,
TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
9810 MI.eraseFromParent();
9814 case ARM::Int_eh_sjlj_setjmp:
9815 case ARM::Int_eh_sjlj_setjmp_nofp:
9816 case ARM::tInt_eh_sjlj_setjmp:
9817 case ARM::t2Int_eh_sjlj_setjmp:
9818 case ARM::t2Int_eh_sjlj_setjmp_nofp:
9821 case ARM::Int_eh_sjlj_setup_dispatch:
9822 EmitSjLjDispatchBlock(
MI, BB);
9847 unsigned int ABSSrcReg =
MI.getOperand(1).getReg();
9848 unsigned int ABSDstReg =
MI.getOperand(0).getReg();
9849 bool ABSSrcKIll =
MI.getOperand(1).isKill();
9850 bool isThumb2 = Subtarget->
isThumb2();
9854 unsigned NewRsbDstReg =
9855 MRI.createVirtualRegister(isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass);
9869 BuildMI(BB, dl,
TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
9876 TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)).
addMBB(SinkBB)
9883 TII->get(isThumb2 ? ARM::t2RSBri : ARM::RSBri), NewRsbDstReg)
9892 TII->get(ARM::PHI), ABSDstReg)
9897 MI.eraseFromParent();
9902 case ARM::COPY_STRUCT_BYVAL_I32:
9904 return EmitStructByval(
MI, BB);
9905 case ARM::WIN__CHKSTK:
9906 return EmitLowered__chkstk(
MI, BB);
9907 case ARM::WIN__DBZCHK:
9908 return EmitLowered__dbzchk(
MI, BB);
9925 if (!
Node->hasAnyUseOfValue(0)) {
9926 MI.getOperand(0).setIsDead(
true);
9928 if (!
Node->hasAnyUseOfValue(1)) {
9929 MI.getOperand(1).setIsDead(
true);
9933 for (
unsigned I = 0;
I !=
MI.getOperand(4).getImm(); ++
I) {
9934 unsigned TmpReg =
MRI.createVirtualRegister(isThumb1 ? &ARM::tGPRRegClass
9935 : &ARM::GPRRegClass);
9942 if (
MI.getOpcode() == ARM::MEMCPY) {
9960 MCID = &
TII->get(NewOpc);
9963 MI.getDesc().getNumOperands() + 5 -
MI.getDesc().getSize()
9964 &&
"converted opcode should be the same except for cc_out"
9965 " (and, on Thumb1, pred)");
9975 MI.addOperand(
MI.getOperand(1));
9976 MI.RemoveOperand(1);
9980 for (
unsigned i =
MI.getNumOperands(); i--;) {
9982 if (
op.isReg() &&
op.isUse()) {
9985 MI.tieOperands(DefIdx, i);
10000 assert(!NewOpc &&
"Optional cc_out operand required");
10005 bool definesCPSR =
false;
10006 bool deadCPSR =
false;
10007 for (
unsigned i = MCID->
getNumOperands(), e =
MI.getNumOperands(); i != e;
10011 definesCPSR =
true;
10014 MI.RemoveOperand(i);
10018 if (!definesCPSR) {
10019 assert(!NewOpc &&
"Optional cc_out operand required");
10022 assert(deadCPSR == !Node->hasAnyUseOfValue(1) &&
"inconsistent dead flag");
10024 assert(!
MI.getOperand(ccOutIdx).getReg() &&
10025 "expect uninitialized optional cc_out operand");
10063 switch (
N->getOpcode()) {
10064 default:
return false;
10066 CC =
N->getOperand(0);
10088 EVT VT =
N->getValueType(0);
10089 CC =
N->getOperand(0);
10135 bool AllOnes =
false) {
10137 EVT VT =
N->getValueType(0);
10140 bool SwapSelectOps;
10142 NonConstantVal, DAG))
10148 OtherOp, NonConstantVal);
10154 CCOp, TrueVal, FalseVal);
10193 if (!
N->getValueType(0).is64BitVector())
10201 EVT VT =
N->getValueType(0);
10240 EVT VT =
N->getValueType(0);
10246 Opcode = Intrinsic::arm_neon_vpaddls;
10248 Opcode = Intrinsic::arm_neon_vpaddlu;
10276 EVT VT =
N->getValueType(0);
10291 unsigned nextIndex = 0;
10359 return DAG.
getNode(ExtOp, dl, VT, tmp);
10373 if (!Subtarget->
hasDSP())
10393 if (SRA.getOpcode() !=
ISD::SRA) {
10399 if (
auto Const = dyn_cast<ConstantSDNode>(SRA.getOperand(1))) {
10400 if (Const->getZExtValue() != 31)
10405 if (SRA.getOperand(0) != Mul)
10409 SDLoc dl(AddcNode);
10410 unsigned Opcode = 0;
10438 SDValue HiMLALResult(SMLAL.getNode(), 1);
10439 SDValue LoMLALResult(SMLAL.getNode(), 0);
10445 SDValue resNode(AddcNode, 0);
10474 "Expect an ADDE or SUBE");
10478 "ADDE node has the wrong inputs");
10497 "Expect ADDC with two result values. First: i32");
10517 bool IsLeftOperandMUL =
false;
10522 IsLeftOperandMUL =
true;
10533 SDValue *LowAddSub =
nullptr;
10536 if ((AddeSubeOp0 != MULOp.
getValue(1)) && (AddeSubeOp1 != MULOp.
getValue(1)))
10539 if (IsLeftOperandMUL)
10540 HiAddSub = &AddeSubeOp1;
10542 HiAddSub = &AddeSubeOp0;
10547 if (AddcSubcOp0 == MULOp.
getValue(0)) {
10548 LoMul = &AddcSubcOp0;
10549 LowAddSub = &AddcSubcOp1;
10551 if (AddcSubcOp1 == MULOp.
getValue(0)) {
10552 LoMul = &AddcSubcOp1;
10553 LowAddSub = &AddcSubcOp0;
10561 if (AddcSubcNode == HiAddSub->getNode() ||
10591 return SDValue(AddeSubeNode, 0);
10612 return SDValue(AddeSubeNode, 0);
10633 SDNode *UmlalNode =
nullptr;
10677 SDNode* AddcNode =
N->getOperand(2).getNode();
10678 SDNode* AddeNode =
N->getOperand(3).getNode();
10686 {N->getOperand(0), N->getOperand(1),
10687 AddcNode->getOperand(0), AddcNode->getOperand(1)});
10711 int32_t imm =
C->getSExtValue();
10712 if (imm < 0 && imm > std::numeric_limits<int>::min()) {
10717 return DAG.
getNode(Opcode, DL,
N->getVTList(),
N->getOperand(0), RHS);
10732 int64_t imm =
C->getSExtValue();
10743 return DAG.
getNode(Opcode, DL,
N->getVTList(),
10744 N->getOperand(0), RHS,
N->getOperand(2));
10829 if (
auto *Const = dyn_cast<ConstantSDNode>(N1->
getOperand(1))) {
10830 if (Const->getAPIntValue().ult(256))
10833 Const->getAPIntValue().sgt(-256))
10889 if (ST->isThumb() && ST->isThumb1Only())
10893 for (
auto U :
N->uses()) {
10894 switch(U->getOpcode()) {
10907 if (isa<ConstantSDNode>(U->getOperand(0)) ||
10908 isa<ConstantSDNode>(U->getOperand(1)))
10912 if (U->getOperand(0).getOpcode() ==
ISD::SHL ||
10913 U->getOperand(1).getOpcode() ==
ISD::SHL)
10923 if (
N->getOperand(0).getOpcode() !=
ISD::SHL)
10928 auto *C1ShlC2 = dyn_cast<ConstantSDNode>(
N->getOperand(1));
10929 auto *C2 = dyn_cast<ConstantSDNode>(SHL.getOperand(1));
10930 if (!C1ShlC2 || !C2)
10933 APInt C2Int = C2->getAPIntValue();
10934 APInt C1Int = C1ShlC2->getAPIntValue();
10939 if ((C1Int & Mask) != C1Int)
10946 auto LargeImm = [](
const APInt &Imm) {
10947 unsigned Zeros = Imm.countLeadingZeros() + Imm.countTrailingZeros();
10948 return Imm.getBitWidth() - Zeros > 8;
10951 if (LargeImm(C1Int) || LargeImm(C2Int))
10963 SHL.dump();
N->dump());
11041 EVT VT =
N->getValueType(0);
11045 return DAG.
getNode(Opcode, DL, VT,
11061 EVT VT =
N->getValueType(0);
11071 int64_t MulAmt =
C->getSExtValue();
11072 unsigned ShiftAmt = countTrailingZeros<uint64_t>(MulAmt);
11074 ShiftAmt = ShiftAmt & (32 - 1);
11079 MulAmt >>= ShiftAmt;
11101 uint64_t MulAmtAbs = -MulAmt;
11149 if (C1 == 255 || C1 == 65535)
11152 SDNode *N0 =
N->getOperand(0).getNode();
11166 if (!C2 || C2 >= 32)
11210 if (Trailing == C2 && C2 + C3 < 32) {
11223 if (Leading == C2 && C2 + C3 < 32) {
11243 EVT VT =
N->getValueType(0);
11249 APInt SplatBits, SplatUndef;
11250 unsigned SplatBitSize;
11252 if (BVN && Subtarget->
hasNEON() &&
11253 BVN->
isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
11254 if (SplatBitSize <= 64) {
11298 SRL =
OR->getOperand(1);
11299 SHL =
OR->getOperand(0);
11306 if ((SRL.getOperand(0).getNode() != SHL.getOperand(0).getNode()) ||
11310 SDNode *SMULLOHI = SRL.getOperand(0).getNode();
11311 if (SRL.getOperand(0) !=
SDValue(SMULLOHI, 0) ||
11312 SHL.getOperand(0) !=
SDValue(SMULLOHI, 1))
11331 unsigned Opcode = 0;
11332 if (
isS16(OpS16, DAG))
11353 EVT VT =
N->getValueType(0);
11381 if (Mask == 0xffff)
11388 if ((Val & ~Mask) != Val)
11413 (Mask == ~Mask2)) {
11416 if (Subtarget->
hasDSP() &&
11417 (Mask == 0xffff || Mask == 0xffff0000))
11430 (~Mask == Mask2)) {
11433 if (Subtarget->
hasDSP() &&
11434 (Mask2 == 0xffff || Mask2 == 0xffff0000))
11455 unsigned ShAmtC = cast<ConstantSDNode>(ShAmt)->getZExtValue();
11479 EVT VT =
N->getValueType(0);
11485 APInt SplatBits, SplatUndef;
11486 unsigned SplatBitSize;
11488 if (BVN && Subtarget->
hasNEON() &&
11489 BVN->
isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
11490 if (SplatBitSize <= 64) {
11527 unsigned SplatBitSize;
11530 APInt SplatBits0, SplatBits1;
11534 if (BVN0 && BVN0->
isConstantSplat(SplatBits0, SplatUndef, SplatBitSize,
11535 HasAnyUndefs) && !HasAnyUndefs) {
11536 if (BVN1 && BVN1->
isConstantSplat(SplatBits1, SplatUndef, SplatBitSize,
11537 HasAnyUndefs) && !HasAnyUndefs) {
11542 SplatBits0 == ~SplatBits1) {
11572 EVT VT =
N->getValueType(0);
11597 ToMask = ~cast<ConstantSDNode>(
N->getOperand(2))->getAPIntValue();
11603 isa<ConstantSDNode>(
From->getOperand(1))) {
11604 APInt Shift = cast<ConstantSDNode>(
From->getOperand(1))->getAPIntValue();
11617 unsigned LastActiveBitInA =
A.countTrailingZeros();
11618 unsigned FirstActiveBitInB =
B.getBitWidth() -
B.countLeadingZeros() - 1;
11619 return LastActiveBitInA - 1 == FirstActiveBitInB;
11625 APInt ToMask, FromMask;
11633 APInt CombinedToMask = ToMask;
11635 APInt NewToMask, NewFromMask;
11637 if (NewFrom !=
From) {
11639 CombinedToMask |= NewToMask;
11645 if ((NewToMask & CombinedToMask).getBoolValue())
11658 CombinedToMask |= NewToMask;
11675 unsigned InvMask = cast<ConstantSDNode>(
N->getOperand(2))->getZExtValue();
11679 static_cast<unsigned>(std::numeric_limits<unsigned>::digits) &&
11680 "undefined behavior");
11681 unsigned Mask = (1u << Width) - 1;
11683 if ((Mask & (~Mask2)) == 0)
11687 }
else if (
N->getOperand(0).getOpcode() ==
ARMISD::BFI) {
11696 APInt ToMask1, FromMask1;
11699 APInt ToMask2, FromMask2;
11707 APInt NewFromMask = FromMask1 | FromMask2;
11708 APInt NewToMask = ToMask1 | ToMask2;
11710 EVT VT =
N->getValueType(0);
11713 if (NewFromMask[0] == 0)
11729 SDValue InDouble =
N->getOperand(0);
11738 !cast<LoadSDNode>(InNode)->isVolatile()) {
11744 SDValue BasePtr = LD->getBasePtr();
11746 DAG.
getLoad(
MVT::i32, DL, LD->getChain(), BasePtr, LD->getPointerInfo(),
11747 LD->getAlignment(), LD->getMemOperand()->getFlags());
11753 LD->getPointerInfo().getWithOffset(4),
11754 std::min(4U, LD->getAlignment()),
11755 LD->getMemOperand()->getFlags());
11790 unsigned NumElts =
N->getValueType(0).getVectorNumElements();
11791 for (
unsigned i = 0; i < NumElts; ++i) {
11792 SDNode *Elt =
N->getOperand(i).getNode();
11809 if (
N->getNumOperands() == 2)
11815 EVT VT =
N->getValueType(0);
11821 for (
unsigned i = 0; i < NumElts; ++i) {
11847 EVT VT =
N->getValueType(0);
11860 Use->getValueType(0).isFloatingPoint())
11868 unsigned NumOfBitCastedElts = 0;
11870 unsigned NumOfRelevantElts = NumElts;
11871 for (
unsigned Idx = 0;
Idx < NumElts; ++
Idx) {
11876 ++NumOfBitCastedElts;
11877 }
else if (Elt.
isUndef() || isa<ConstantSDNode>(Elt))
11880 --NumOfRelevantElts;
11884 if (NumOfBitCastedElts <= NumOfRelevantElts / 2)
11902 for (
unsigned Idx = 0 ;
Idx < NumElts; ++
Idx) {
11930 EVT VT =
N->getValueType(0);
11931 SDNode *Elt =
N->getOperand(1).getNode();
11946 Vec, V,
N->getOperand(2));
11976 EVT VT =
N->getValueType(0);
11987 unsigned HalfElts = NumElts/2;
11989 for (
unsigned n = 0; n < NumElts; ++n) {
11992 if (MaskElt < (
int)HalfElts)
11994 else if (MaskElt >= (
int)NumElts && MaskElt < (
int)(NumElts + HalfElts))
11995 NewElt = HalfElts + MaskElt - NumElts;
12013 const unsigned AddrOpIdx = ((isIntrinsic ||
isStore) ? 2 : 1);
12014 SDValue Addr =
N->getOperand(AddrOpIdx);
12023 UI.getUse().getResNo() != Addr.
getResNo())
12039 bool isLoadOp =
true;
12040 bool isLaneOp =
false;
12041 unsigned NewOpc = 0;
12042 unsigned NumVecs = 0;
12044 unsigned IntNo = cast<ConstantSDNode>(
N->getOperand(1))->getZExtValue();
12048 NumVecs = 1;
break;
12050 NumVecs = 2;
break;
12052 NumVecs = 3;
break;
12054 NumVecs = 4;
break;
12055 case Intrinsic::arm_neon_vld2dup:
12056 case Intrinsic::arm_neon_vld3dup:
12057 case Intrinsic::arm_neon_vld4dup:
12062 NumVecs = 2; isLaneOp =
true;
break;
12064 NumVecs = 3; isLaneOp =
true;
break;
12066 NumVecs = 4; isLaneOp =
true;
break;
12068 NumVecs = 1; isLoadOp =
false;
break;
12070 NumVecs = 2; isLoadOp =
false;
break;
12072 NumVecs = 3; isLoadOp =
false;
break;
12074 NumVecs = 4; isLoadOp =
false;
break;
12076 NumVecs = 2; isLoadOp =
false; isLaneOp =
true;
break;
12078 NumVecs = 3; isLoadOp =
false; isLaneOp =
true;
break;
12080 NumVecs = 4; isLoadOp =
false; isLaneOp =
true;
break;
12084 switch (
N->getOpcode()) {
12091 NumVecs = 1; isLaneOp =
false;
break;
12093 NumVecs = 1; isLaneOp =
false; isLoadOp =
false;
break;
12100 VecTy =
N->getValueType(0);
12101 }
else if (isIntrinsic) {
12102 VecTy =
N->getOperand(AddrOpIdx+1).getValueType();
12104 assert(
isStore &&
"Node has to be a load, a store, or an intrinsic!");
12105 VecTy =
N->getOperand(1).getValueType();
12115 if (NumBytes >= 3 * 16 && (!CInc || CInc->
getZExtValue() != NumBytes)) {
12124 EVT AlignedVecTy = VecTy;
12141 if (isa<LSBaseSDNode>(
N)) {
12142 if (Alignment == 0)
12146 assert(NumVecs == 1 &&
"Unexpected multi-element generic load/store.");
12147 assert(!isLaneOp &&
"Unexpected generic load/store lane.");
12164 unsigned NumResultVecs = (isLoadOp ? NumVecs : 0);
12166 for (n = 0; n < NumResultVecs; ++n)
12167 Tys[n] = AlignedVecTy;
12184 for (
unsigned i = AddrOpIdx + 1; i <
N->getNumOperands() - 1; ++i)
12193 if (AlignedVecTy != VecTy &&
N->getOpcode() ==
ISD::STORE) {
12204 for (
unsigned i = 0; i < NumResultVecs; ++i)
12209 if (AlignedVecTy != VecTy &&
N->getOpcode() ==
ISD::LOAD) {
12210 SDValue &LdVal = NewResults[0];
12237 EVT VT =
N->getValueType(0);
12243 SDNode *VLD =
N->getOperand(0).getNode();
12246 unsigned NumVecs = 0;
12247 unsigned NewOpc = 0;
12248 unsigned IntNo = cast<ConstantSDNode>(VLD->
getOperand(1))->getZExtValue();
12249 if (IntNo == Intrinsic::arm_neon_vld2lane) {
12252 }
else if (IntNo == Intrinsic::arm_neon_vld3lane) {
12255 }
else if (IntNo == Intrinsic::arm_neon_vld4lane) {
12264 unsigned VLDLaneNo =
12265 cast<ConstantSDNode>(VLD->
getOperand(NumVecs+3))->getZExtValue();
12269 if (UI.getUse().getResNo() == NumVecs)
12273 VLDLaneNo != cast<ConstantSDNode>(
User->
getOperand(1))->getZExtValue())
12280 for (n = 0; n < NumVecs; ++n)
12293 unsigned ResNo = UI.getUse().
getResNo();
12295 if (ResNo == NumVecs)
12303 std::vector<SDValue> VLDDupResults;
12304 for (
unsigned n = 0; n < NumVecs; ++n)
12326 Op = Op.getOperand(0);
12331 unsigned EltSize = Op.getScalarValueSizeInBits();
12333 unsigned Imm = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
12337 EVT VT =
N->getValueType(0);
12357 LoadSDNode *LD = dyn_cast<LoadSDNode>(Op.getNode());
12358 if (LD && Op.hasOneUse() && LD->isUnindexed() &&
12359 LD->getMemoryVT() ==
N->getValueType(0).getVectorElementType()) {
12360 SDValue Ops[] = { LD->getOperand(0), LD->getOperand(1),
12364 Ops, LD->getMemoryVT(),
12365 LD->getMemOperand());
12375 EVT VT =
N->getValueType(0);
12403 assert(StVT != VT &&
"Cannot truncate to the same type");
12412 if (0 != (NumElems * FromEltSz) % ToEltSz)
return SDValue();
12414 unsigned SizeRatio = FromEltSz / ToEltSz;
12419 NumElems*SizeRatio);
12425 for (
unsigned i = 0; i < NumElems; ++i)
12427 ? (i + 1) * SizeRatio - 1
12442 if (TLI.
isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToEltSz)
12461 for (
unsigned I = 0;
I <
E;
I++) {
12463 StoreType, ShuffWide,
12547 if (!Op.getValueType().isVector() || !Op.getValueType().isSimple() ||
12551 SDValue ConstVec = Op->getOperand(1);
12552 if (!isa<BuildVectorSDNode>(ConstVec))
12555 MVT FloatTy = Op.getSimpleValueType().getVectorElementType();
12557 MVT IntTy =
N->getSimpleValueType(0).getVectorElementType();
12559 unsigned NumLanes = Op.getValueType().getVectorNumElements();
12560 if (FloatBits != 32 || IntBits > 32 || (NumLanes != 4 && NumLanes != 2)) {
12571 if (
C == -1 ||
C == 0 ||
C > 32)
12576 unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfp2fxs :
12577 Intrinsic::arm_neon_vcvtfp2fxu;
12583 if (IntBits < FloatBits)
12604 unsigned OpOpcode = Op.getNode()->getOpcode();
12605 if (!
N->getValueType(0).isVector() || !
N->getValueType(0).isSimple() ||
12609 SDValue ConstVec =
N->getOperand(1);
12610 if (!isa<BuildVectorSDNode>(ConstVec))
12613 MVT FloatTy =
N->getSimpleValueType(0).getVectorElementType();
12615 MVT IntTy = Op.getOperand(0).getSimpleValueType().getVectorElementType();
12617 unsigned NumLanes = Op.getValueType().getVectorNumElements();
12618 if (FloatBits != 32 || IntBits > 32 || (NumLanes != 4 && NumLanes != 2)) {
12629 if (
C == -1 ||
C == 0 ||
C > 32)
12634 SDValue ConvInput = Op.getOperand(0);
12635 if (IntBits < FloatBits)
12640 unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfxs2fp :
12641 Intrinsic::arm_neon_vcvtfxu2fp;
12650 unsigned IntNo = cast<ConstantSDNode>(
N->getOperand(0))->getZExtValue();
12661 case Intrinsic::arm_neon_vshifts:
12662 case Intrinsic::arm_neon_vshiftu:
12663 case Intrinsic::arm_neon_vrshifts:
12664 case Intrinsic::arm_neon_vrshiftu:
12665 case Intrinsic::arm_neon_vrshiftn:
12666 case Intrinsic::arm_neon_vqshifts:
12667 case Intrinsic::arm_neon_vqshiftu:
12668 case Intrinsic::arm_neon_vqshiftsu:
12669 case Intrinsic::arm_neon_vqshiftns:
12670 case Intrinsic::arm_neon_vqshiftnu:
12671 case Intrinsic::arm_neon_vqshiftnsu:
12672 case Intrinsic::arm_neon_vqrshiftns:
12673 case Intrinsic::arm_neon_vqrshiftnu:
12674 case Intrinsic::arm_neon_vqrshiftnsu: {
12675 EVT VT =
N->getOperand(1).getValueType();
12677 unsigned VShiftOpc = 0;
12680 case Intrinsic::arm_neon_vshifts:
12681 case Intrinsic::arm_neon_vshiftu:
12686 if (
isVShiftRImm(
N->getOperand(2), VT,
false,
true, Cnt)) {
12693 case Intrinsic::arm_neon_vrshifts:
12694 case Intrinsic::arm_neon_vrshiftu:
12699 case Intrinsic::arm_neon_vqshifts:
12700 case Intrinsic::arm_neon_vqshiftu:
12705 case Intrinsic::arm_neon_vqshiftsu:
12710 case Intrinsic::arm_neon_vrshiftn:
12711 case Intrinsic::arm_neon_vqshiftns:
12712 case Intrinsic::arm_neon_vqshiftnu:
12713 case Intrinsic::arm_neon_vqshiftnsu:
12714 case Intrinsic::arm_neon_vqrshiftns:
12715 case Intrinsic::arm_neon_vqrshiftnu:
12716 case Intrinsic::arm_neon_vqrshiftnsu:
12728 case Intrinsic::arm_neon_vshifts:
12729 case Intrinsic::arm_neon_vshiftu:
12732 case Intrinsic::arm_neon_vrshifts:
12735 case Intrinsic::arm_neon_vrshiftu:
12738 case Intrinsic::arm_neon_vrshiftn:
12741 case Intrinsic::arm_neon_vqshifts:
12744 case Intrinsic::arm_neon_vqshiftu:
12747 case Intrinsic::arm_neon_vqshiftsu:
12750 case Intrinsic::arm_neon_vqshiftns:
12753 case Intrinsic::arm_neon_vqshiftnu:
12756 case Intrinsic::arm_neon_vqshiftnsu:
12759 case Intrinsic::arm_neon_vqrshiftns:
12762 case Intrinsic::arm_neon_vqrshiftnu:
12765 case Intrinsic::arm_neon_vqrshiftnsu:
12771 return DAG.
getNode(VShiftOpc, dl,
N->getValueType(0),
12775 case Intrinsic::arm_neon_vshiftins: {
12776 EVT VT =
N->getOperand(1).getValueType();
12778 unsigned VShiftOpc = 0;
12782 else if (
isVShiftRImm(
N->getOperand(3), VT,
false,
true, Cnt))
12789 return DAG.
getNode(VShiftOpc, dl,
N->getValueType(0),
12790 N->getOperand(1),
N->getOperand(2),
12794 case Intrinsic::arm_neon_vqrshifts:
12795 case Intrinsic::arm_neon_vqrshiftu:
12812 EVT VT =
N->getValueType(0);
12827 N->getOperand(0)->getOpcode() ==
ISD::AND &&
12828 N->getOperand(0)->hasOneUse()) {
12836 ConstantSDNode *ShiftAmtNode = dyn_cast<ConstantSDNode>(
N->getOperand(1));
12845 if (AndMask == 255 || AndMask == 65535)
12849 if (MaskedBits > ShiftAmt) {
12864 if (ST->hasMVEIntegerOps() && VT ==
MVT::v2i64)
12869 switch (
N->getOpcode()) {
12882 if (
isVShiftRImm(
N->getOperand(1), VT,
false,
false, Cnt)) {
12883 unsigned VShiftOpc =
12886 return DAG.
getNode(VShiftOpc, dl, VT,
N->getOperand(0),
12906 EVT VT =
N->getValueType(0);
12913 isa<ConstantSDNode>(Lane)) {
12916 switch (
N->getOpcode()) {
12937 const APInt *CV = &
C->getAPIntValue();
12955 SDValue Op0 = CMOV->getOperand(0);
12956 SDValue Op1 = CMOV->getOperand(1);
12957 auto CCNode = cast<ConstantSDNode>(CMOV->getOperand(2));
12958 auto CC = CCNode->getAPIntValue().getLimitedValue();
12959 SDValue CmpZ = CMOV->getOperand(4);
12995 unsigned Heuristic = Subtarget->
isThumb() ? 3 : 2;
13002 if ((OrCI & Known.
Zero) != OrCI)
13008 EVT VT =
X.getValueType();
13009 unsigned BitInX = AndC->
logBase2();
13017 for (
unsigned BitInY = 0, NumActiveBits = OrCI.
getActiveBits();
13018 BitInY < NumActiveBits; ++BitInY) {
13019 if (OrCI[BitInY] == 0)
13022 Mask.setBit(BitInY);
13043 cast<ConstantSDNode>(CC->
getOperand(1))->isOne()) &&
13044 "Expected to compare against 1");
13052 unsigned IntOp = cast<ConstantSDNode>(Int.getOperand(1))->getZExtValue();
13053 if (IntOp != Intrinsic::test_set_loop_iterations)
13058 SDValue Elements = Int.getOperand(2);
13059 SDValue ExitBlock =
N->getOperand(2);
13064 SDValue Ops[] = { Chain, Elements, ExitBlock };
13078 EVT VT =
N->getValueType(0);
13080 SDValue LHS = Cmp.getOperand(0);
13081 SDValue RHS = Cmp.getOperand(1);
13095 auto *LHS1C = dyn_cast<ConstantSDNode>(LHS->
getOperand(1));
13096 auto *RHSC = dyn_cast<ConstantSDNode>(RHS);
13097 if ((LHS00C && LHS00C->getZExtValue() == 0) &&
13098 (LHS01C && LHS01C->getZExtValue() == 1) &&
13099 (LHS1C && LHS1C->getZExtValue() == 1) &&
13100 (RHSC && RHSC->getZExtValue() == 0)) {
13118 EVT VT =
N->getValueType(0);
13120 SDValue LHS = Cmp.getOperand(0);
13121 SDValue RHS = Cmp.getOperand(1);
13122 SDValue FalseVal =
N->getOperand(0);
13123 SDValue TrueVal =
N->getOperand(1);
13153 if (CC ==
ARMCC::NE && FalseVal == RHS && FalseVal != LHS) {
13155 N->getOperand(3), Cmp);
13156 }
else if (CC ==
ARMCC::EQ && TrueVal == RHS) {
13160 N->getOperand(3), NewCmp);
13166 auto *LHS0C = dyn_cast<ConstantSDNode>(LHS->
getOperand(0));
13167 auto *LHS1C = dyn_cast<ConstantSDNode>(LHS->
getOperand(1));
13168 auto *RHSC = dyn_cast<ConstantSDNode>(RHS);
13169 if ((LHS0C && LHS0C->getZExtValue() == 0) &&
13170 (LHS1C && LHS1C->getZExtValue() == 1) &&
13171 (RHSC && RHSC->getZExtValue() == 0)) {
13220 N->getOperand(3), CPSRGlue.
getValue(1));
13235 N->getOperand(3), CPSRGlue.
getValue(1));
13253 const APInt *TrueConst;
13260 unsigned ShiftAmount = TrueConst->
logBase2();
13274 if (Known.
Zero == 0xfffffffe)
13277 else if (Known.
Zero == 0xffffff00)
13280 else if (Known.
Zero == 0xffff0000)
13290 switch (
N->getOpcode()) {
13338 unsigned BitWidth =
N->getValueType(0).getSizeInBits();
13345 unsigned BitWidth =
N->getValueType(0).getSizeInBits();
13352 unsigned BitWidth =
N->getValueType(0).getSizeInBits();
13360 unsigned LowWidth =
N->getOperand(0).getValueType().getSizeInBits();
13362 unsigned HighWidth =
N->getOperand(1).getValueType().getSizeInBits();
13370 unsigned HighWidth =
N->getOperand(0).getValueType().getSizeInBits();
13372 unsigned LowWidth =
N->getOperand(1).getValueType().getSizeInBits();
13380 unsigned BitWidth =
N->getValueType(0).getSizeInBits();
13389 switch (cast<ConstantSDNode>(
N->getOperand(1))->getZExtValue()) {
13390 case Intrinsic::arm_neon_vld1:
13391 case Intrinsic::arm_neon_vld1x2:
13392 case Intrinsic::arm_neon_vld1x3:
13393 case Intrinsic::arm_neon_vld1x4:
13394 case Intrinsic::arm_neon_vld2:
13395 case Intrinsic::arm_neon_vld3:
13396 case Intrinsic::arm_neon_vld4:
13397 case Intrinsic::arm_neon_vld2lane:
13398 case Intrinsic::arm_neon_vld3lane:
13399 case Intrinsic::arm_neon_vld4lane:
13400 case Intrinsic::arm_neon_vld2dup:
13401 case Intrinsic::arm_neon_vld3dup:
13402 case Intrinsic::arm_neon_vld4dup:
13403 case Intrinsic::arm_neon_vst1:
13404 case Intrinsic::arm_neon_vst1x2:
13405 case Intrinsic::arm_neon_vst1x3:
13406 case Intrinsic::arm_neon_vst1x4:
13407 case Intrinsic::arm_neon_vst2:
13408 case Intrinsic::arm_neon_vst3:
13409 case Intrinsic::arm_neon_vst4:
13410 case Intrinsic::arm_neon_vst2lane:
13411 case Intrinsic::arm_neon_vst3lane:
13412 case Intrinsic::arm_neon_vst4lane:
13427 unsigned Alignment,
13429 bool *Fast)
const {
13440 if (AllowsUnaligned) {
13451 if (Subtarget->
hasNEON() && (AllowsUnaligned || Subtarget->
isLittle())) {
13507 unsigned AlignCheck) {
13508 return ((SrcAlign == 0 || SrcAlign % AlignCheck == 0) &&
13509 (DstAlign == 0 || DstAlign % AlignCheck == 0));
13513 uint64_t
Size,
unsigned DstAlign,
unsigned SrcAlign,
bool IsMemset,
13514 bool ZeroMemset,
bool MemcpyStrSrc,
13517 if ((!IsMemset || ZeroMemset) && Subtarget->
hasNEON() &&
13526 }
else if (
Size >= 8 &&
13547 return (SrcBits == 64 && DestBits == 32);
13556 return (SrcBits == 64 && DestBits == 32);
13602 return Ext->getType()->getScalarSizeInBits() ==
13603 2 * Ext->getOperand(0)->getType()->getScalarSizeInBits();
13608 !areExtDoubled(cast<Instruction>(Ext1)) ||
13609 !areExtDoubled(cast<Instruction>(Ext2)))
13620 if (!Subtarget->
hasNEON() || !
I->getType()->isVectorTy())
13623 switch (
I->getOpcode()) {
13676 unsigned AS)
const {
13679 return AM.
Scale < 0 ? 1 : 0;
13689 unsigned Scale = 1;
13706 if ((V & (Scale - 1)) != 0)
13708 return isUInt<5>(V / Scale);
13721 bool IsNeg =
false;
13734 return isShiftedUInt<7,2>(V);
13737 return isShiftedUInt<7,1>(V);
13739 return isUInt<7>(V);
13747 return isShiftedUInt<8, 1>(V);
13750 return isShiftedUInt<8, 2>(V);
13752 if (NumBytes == 1 || NumBytes == 2 || NumBytes == 4) {
13756 return isUInt<12>(V);
13782 default:
return false;
13787 return isUInt<12>(V);
13795 return isShiftedUInt<8, 2>(V);
13801 int Scale = AM.
Scale;
13806 default:
return false;
13814 Scale = Scale & ~1;
13815 return Scale == 2 || Scale == 4 || Scale == 8;
13832 if (Scale & 1)
return false;
13839 const int Scale = AM.
Scale;
13849 return (Scale == 1) || (!AM.
HasBaseReg && Scale == 2);
13865 switch (AM.
Scale) {
13882 int Scale = AM.
Scale;
13884 default:
return false;
13888 if (Scale < 0) Scale = -Scale;
13896 if (Scale == 1 || (AM.
HasBaseReg && Scale == -1))
13909 if (Scale & 1)
return false;
13929 return Imm >= 0 && Imm <= 255;
13938 int64_t AbsImm = std::abs(Imm);
13944 return AbsImm >= 0 && AbsImm <= 255;
13948 bool isSEXTLoad,
SDValue &Base,
13949 SDValue &Offset,
bool &isInc,
13958 int RHSC = (int)RHS->getZExtValue();
13959 if (RHSC < 0 && RHSC > -256) {
13972 int RHSC = (int)RHS->getZExtValue();
13973 if (RHSC < 0 && RHSC > -0x1000) {
14007 bool isSEXTLoad,
SDValue &Base,
14008 SDValue &Offset,
bool &isInc,
14015 int RHSC = (int)RHS->getZExtValue();
14016 if (RHSC < 0 && RHSC > -0x100) {
14021 }
else if (RHSC > 0 && RHSC < 0x100) {
14044 bool isSEXTLoad =
false;
14046 Ptr = LD->getBasePtr();
14047 VT = LD->getMemoryVT();
14049 }
else if (
StoreSDNode *ST = dyn_cast<StoreSDNode>(
N)) {
14050 Ptr = ST->getBasePtr();
14051 VT = ST->getMemoryVT();
14056 bool isLegal =
false;
14059 Offset, isInc, DAG);
14062 Offset, isInc, DAG);
14080 bool isSEXTLoad =
false, isNonExt;
14082 VT = LD->getMemoryVT();
14083 Ptr = LD->getBasePtr();
14086 }
else if (
StoreSDNode *ST = dyn_cast<StoreSDNode>(
N)) {
14087 VT = ST->getMemoryVT();
14088 Ptr = ST->getBasePtr();
14089 isNonExt = !ST->isTruncatingStore();
14096 assert(Op->getValueType(0) ==
MVT::i32 &&
"Non-i32 post-inc op?!");
14097 if (Op->getOpcode() !=
ISD::ADD || !isNonExt)
14099 auto *RHS = dyn_cast<ConstantSDNode>(Op->getOperand(1));
14100 if (!RHS || RHS->getZExtValue() != 4)
14103 Offset = Op->getOperand(1);
14104 Base = Op->getOperand(0);
14110 bool isLegal =
false;
14123 if (Ptr == Offset && Op->getOpcode() ==
ISD::ADD &&
14138 const APInt &DemandedElts,
14140 unsigned Depth)
const {
14143 switch (Op.getOpcode()) {
14150 if (Op.getResNo() == 0) {
14151 SDValue LHS = Op.getOperand(0);
14152 SDValue RHS = Op.getOperand(1);
14169 Known.
One &= KnownRHS.
One;
14177 case Intrinsic::arm_ldaex:
14178 case Intrinsic::arm_ldrex: {
14179 EVT VT = cast<MemIntrinsicSDNode>(Op)->getMemoryVT();
14195 Known.
Zero &= Mask;
14201 const SDValue &SrcSV = Op.getOperand(0);
14205 ConstantSDNode *Pos = cast<ConstantSDNode>(Op.getOperand(1).getNode());
14207 "VGETLANE index out of bounds");
14212 EVT VT = Op.getValueType();
14219 Known = Known.
sext(DstSz);
14221 Known = Known.
zext(DstSz,
true );
14231 const APInt &DemandedAPInt,
14242 EVT VT = Op.getValueType();
14255 unsigned Mask =
C->getZExtValue();
14258 unsigned ShrunkMask = Mask & Demanded;
14259 unsigned ExpandedMask = Mask | ~Demanded;
14263 if (ShrunkMask == 0)
14269 if (ExpandedMask == ~0U)
14270 return TLO.
CombineTo(Op, Op.getOperand(0));
14272 auto IsLegalMask = [ShrunkMask, ExpandedMask](
unsigned Mask) ->
bool {
14273 return (ShrunkMask & Mask) == ShrunkMask && (~ExpandedMask & Mask) == 0;
14275 auto UseMask = [Mask, Op, VT, &TLO](
unsigned NewMask) ->
bool {
14276 if (NewMask == Mask)
14285 if (IsLegalMask(0xFF))
14286 return UseMask(0xFF);
14289 if (IsLegalMask(0xFFFF))
14290 return UseMask(0xFFFF);
14294 if (ShrunkMask < 256)
14295 return UseMask(ShrunkMask);
14299 if ((
int)ExpandedMask <= -2 && (
int)ExpandedMask >= -256)
14300 return UseMask(ExpandedMask);
14327 switch (AsmPieces.
size()) {
14328 default:
return false;
14330 AsmStr = AsmPieces[0];
14335 if (AsmPieces.
size() == 3 &&
14336 AsmPieces[0] ==
"rev" && AsmPieces[1] ==
"$0" && AsmPieces[2] ==
"$1" &&
14372 unsigned S = Constraint.
size();
14374 switch (Constraint[0]) {
14386 }
else if (S == 2) {
14387 switch (Constraint[0]) {
14404 Value *CallOperandVal =
info.CallOperandVal;
14407 if (!CallOperandVal)
14411 switch (*constraint) {
14431using RCPair = std::pair<unsigned, const TargetRegisterClass *>;
14435 switch (Constraint.
size()) {
14438 switch (Constraint[0]) {
14441 return RCPair(0U, &ARM::tGPRRegClass);
14442 return RCPair(0U, &ARM::GPRRegClass);
14445 return RCPair(0U, &ARM::hGPRRegClass);
14449 return RCPair(0U, &ARM::tGPRRegClass);
14450 return RCPair(0U, &ARM::GPRRegClass);
14455 return RCPair(0U, &ARM::SPRRegClass);
14457 return RCPair(0U, &ARM::DPRRegClass);
14459 return RCPair(0U, &ARM::QPRRegClass);
14465 return RCPair(0U, &ARM::SPR_8RegClass);
14467 return RCPair(0U, &ARM::DPR_8RegClass);
14469 return RCPair(0U, &ARM::QPR_8RegClass);
14475 return RCPair(0U, &ARM::SPRRegClass);
14477 return RCPair(0U, &ARM::DPR_VFP2RegClass);
14479 return RCPair(0U, &ARM::QPR_VFP2RegClass);
14485 if (Constraint[0] ==
'T') {
14486 switch (Constraint[1]) {
14490 return RCPair(0U, &ARM::tGPREvenRegClass);
14492 return RCPair(0U, &ARM::tGPROddRegClass);
14501 if (
StringRef(
"{cc}").equals_lower(Constraint))
14502 return std::make_pair(
unsigned(ARM::CPSR), &ARM::CCRRegClass);
14510 std::string &Constraint,
14511 std::vector<SDValue>&Ops,
14516 if (Constraint.length() != 1)
return;
14518 char ConstraintLetter = Constraint[0];
14519 switch (ConstraintLetter) {
14522 case 'I':
case 'J':
case 'K':
case 'L':
14523 case 'M':
case 'N':
case 'O':
14528 int64_t CVal64 =
C->getSExtValue();
14529 int CVal = (int) CVal64;
14532 if (CVal != CVal64)
14535 switch (ConstraintLetter) {
14540 if (CVal >= 0 && CVal <= 65535)
14547 if (CVal >= 0 && CVal <= 255)
14549 }
else if (Subtarget->
isThumb2()) {
14568 if (CVal >= -255 && CVal <= -1)
14574 if (CVal >= -4095 && CVal <= 4095)
14587 }
else if (Subtarget->
isThumb2()) {
14610 if (CVal >= -7 && CVal < 7)
14612 }
else if (Subtarget->
isThumb2()) {
14635 if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0))
14641 if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0))
14649 if (CVal >= 0 && CVal <= 31)
14658 if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0))
14667 if (Result.getNode()) {
14668 Ops.push_back(Result);
14678 "Unhandled Opcode in getDivRemLibcall");
14684 case MVT::i8: LC = isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8;
break;
14685 case MVT::i16: LC = isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16;
break;
14686 case MVT::i32: LC = isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32;
break;
14687 case MVT::i64: LC = isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64;
break;
14696 "Unhandled Opcode in getDivRemArgList");
14701 for (
unsigned i = 0, e =
N->getNumOperands(); i != e; ++i) {
14702 EVT ArgVT =
N->getOperand(i).getValueType();
14704 Entry.Node =
N->getOperand(i);
14706 Entry.IsSExt = isSigned;
14707 Entry.IsZExt = !isSigned;
14708 Args.push_back(
Entry);
14719 "Register-based DivRem lowering only");
14720 unsigned Opcode =
Op->getOpcode();
14722 "Invalid opcode for Div/Rem lowering");
14724 EVT VT =
Op->getValueType(0);
14735 if (hasDivide &&
Op->getValueType(0).isSimple() &&
14738 const SDValue Dividend =
Op->getOperand(0);
14739 const SDValue Divisor =
Op->getOperand(1);
14740 SDValue Div = DAG.
getNode(DivOpcode, dl, VT, Dividend, Divisor);
14744 SDValue Values[2] = {Div, Rem};
14767 .setInRegister().setSExtResult(isSigned).setZExtResult(!isSigned);
14777 std::vector<Type*> RetTyParams;
14778 Type *RetTyElement;
14780 switch (
N->getValueType(0).getSimpleVT().SimpleTy) {
14788 RetTyParams.push_back(RetTyElement);
14789 RetTyParams.push_back(RetTyElement);
14798 bool isSigned =
N->getOpcode() ==
ISD::SREM;
14806 CallLoweringInfo CLI(DAG);
14809 .setSExtResult(isSigned).setZExtResult(!isSigned).setDebugLoc(
SDLoc(
N));
14810 std::pair<SDValue, SDValue> CallResult =
LowerCallTo(CLI);
14813 SDNode *ResNode = CallResult.first.getNode();
14828 "no-stack-arg-probe")) {
14829 unsigned Align = cast<ConstantSDNode>(
Op.getOperand(2))->getZExtValue();
14837 SDValue Ops[2] = { SP, Chain };
14845 Chain = DAG.
getCopyToReg(Chain, DL, ARM::R4, Words, Flag);
14854 SDValue Ops[2] = { NewSP, Chain };
14860 const unsigned DstSz =
Op.getValueType().getSizeInBits();
14862 assert(DstSz > SrcSz && DstSz <= 64 && SrcSz >= 16 &&
14863 "Unexpected type for custom-lowering FP_EXTEND");
14866 "With both FP DP and 16, any FP conversion is legal!");
14869 "With FP16, 16 to 32 conversion is legal!");
14885 assert(LC != RTLIB::UNKNOWN_LIBCALL &&
14886 "Unexpected type for custom-lowering FP_EXTEND");
14899 assert(LC != RTLIB::UNKNOWN_LIBCALL &&
14900 "Unexpected type for custom-lowering FP_EXTEND");
14907 EVT DstVT =
Op.getValueType();
14908 const unsigned DstSz =
Op.getValueType().getSizeInBits();
14911 assert(DstSz < SrcSz && SrcSz <= 64 && DstSz >= 16 &&
14912 "Unexpected type for custom-lowering FP_ROUND");
14915 "With both FP DP and 16, any FP conversion is legal!");
14920 if (SrcSz == 32 && Subtarget->
hasFP16())
14925 assert(LC != RTLIB::UNKNOWN_LIBCALL &&
14926 "Unexpected type for custom-lowering FP_ROUND");
14927 return makeLibCall(DAG, LC, DstVT, SrcVal,
false, Loc).first;
14932 assert(
N->getValueType(0) ==
MVT::i64 &&
"Unexpected type (!= i64) on ABS.");
14969 if (v == 0xffffffff)
14981 bool ForCodeSize)
const {
14999 unsigned Intrinsic)
const {
15000 switch (Intrinsic) {
15001 case Intrinsic::arm_neon_vld1:
15002 case Intrinsic::arm_neon_vld2:
15003 case Intrinsic::arm_neon_vld3:
15004 case Intrinsic::arm_neon_vld4:
15005 case Intrinsic::arm_neon_vld2lane:
15006 case Intrinsic::arm_neon_vld3lane:
15007 case Intrinsic::arm_neon_vld4lane:
15008 case Intrinsic::arm_neon_vld2dup:
15009 case Intrinsic::arm_neon_vld3dup:
15010 case Intrinsic::arm_neon_vld4dup: {
15013 auto &DL =
I.getCalledFunction()->getParent()->getDataLayout();
15014 uint64_t NumElts = DL.getTypeSizeInBits(
I.getType()) / 64;
15016 Info.ptrVal =
I.getArgOperand(0);
15018 Value *AlignArg =
I.getArgOperand(
I.getNumArgOperands() - 1);
15019 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue();
15024 case Intrinsic::arm_neon_vld1x2:
15025 case Intrinsic::arm_neon_vld1x3:
15026 case Intrinsic::arm_neon_vld1x4: {
15029 auto &DL =
I.getCalledFunction()->getParent()->getDataLayout();
15030 uint64_t NumElts = DL.getTypeSizeInBits(
I.getType()) / 64;
15032 Info.ptrVal =
I.getArgOperand(
I.getNumArgOperands() - 1);
15039 case Intrinsic::arm_neon_vst1:
15040 case Intrinsic::arm_neon_vst2:
15041 case Intrinsic::arm_neon_vst3:
15042 case Intrinsic::arm_neon_vst4:
15043 case Intrinsic::arm_neon_vst2lane:
15044 case Intrinsic::arm_neon_vst3lane:
15045 case Intrinsic::arm_neon_vst4lane: {
15048 auto &DL =
I.getCalledFunction()->getParent()->getDataLayout();
15049 unsigned NumElts = 0;
15050 for (
unsigned ArgI = 1, ArgE =
I.getNumArgOperands(); ArgI < ArgE; ++ArgI) {
15051 Type *ArgTy =
I.getArgOperand(ArgI)->getType();
15054 NumElts += DL.getTypeSizeInBits(ArgTy) / 64;
15057 Info.ptrVal =
I.getArgOperand(0);
15059 Value *AlignArg =
I.getArgOperand(
I.getNumArgOperands() - 1);
15060 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue();
15065 case Intrinsic::arm_neon_vst1x2:
15066 case Intrinsic::arm_neon_vst1x3:
15067 case Intrinsic::arm_neon_vst1x4: {
15070 auto &DL =
I.getCalledFunction()->getParent()->getDataLayout();
15071 unsigned NumElts = 0;
15072 for (
unsigned ArgI = 1, ArgE =
I.getNumArgOperands(); ArgI < ArgE; ++ArgI) {
15073 Type *ArgTy =
I.getArgOperand(ArgI)->getType();
15076 NumElts += DL.getTypeSizeInBits(ArgTy) / 64;
15079 Info.ptrVal =
I.getArgOperand(0);
15086 case Intrinsic::arm_ldaex:
15087 case Intrinsic::arm_ldrex: {
15088 auto &DL =
I.getCalledFunction()->getParent()->getDataLayout();
15089 PointerType *PtrTy = cast<PointerType>(
I.getArgOperand(0)->getType());
15092 Info.ptrVal =
I.getArgOperand(0);
15098 case Intrinsic::arm_stlex:
15099 case Intrinsic::arm_strex: {
15100 auto &DL =
I.getCalledFunction()->getParent()->getDataLayout();
15101 PointerType *PtrTy = cast<PointerType>(
I.getArgOperand(1)->getType());
15104 Info.ptrVal =
I.getArgOperand(1);
15110 case Intrinsic::arm_stlexd:
15111 case Intrinsic::arm_strexd:
15114 Info.ptrVal =
I.getArgOperand(2);
15120 case Intrinsic::arm_ldaexd:
15121 case Intrinsic::arm_ldrexd:
15124 Info.ptrVal =
I.getArgOperand(0);
15144 if (Bits == 0 || Bits > 32)
15150 unsigned Index)
const {
15235 unsigned Size =
SI->getValueOperand()->getType()->getPrimitiveSizeInBits();
15262 return (
Size <= (Subtarget->
isMClass() ? 32U : 64U) && hasAtomicRMW)
15274 bool HasAtomicCmpXchg =
15283 return InsertFencesForAtomic;
15296 M.getOrInsertGlobal(
"__security_cookie",
15304 F->addAttribute(1, Attribute::AttrKind::InReg);
15310 return M.getGlobalVariable(
"__security_cookie");
15317 return M.getFunction(
"__security_check_cookie");
15322 unsigned &Cost)
const {
15336 if (!isa<ConstantInt>(
Idx))
15340 unsigned BitWidth = cast<VectorType>(VectorTy)->getBitWidth();
15343 if (BitWidth == 64 || BitWidth == 128) {
15365 Type *ValTy = cast<PointerType>(Addr->
getType())->getElementType();
15373 IsAcquire ? Intrinsic::arm_ldaexd : Intrinsic::arm_ldrexd;
15390 Intrinsic::ID Int = IsAcquire ? Intrinsic::arm_ldaex : Intrinsic::arm_ldrex;
15395 cast<PointerType>(Addr->
getType())->getElementType());
15417 IsRelease ? Intrinsic::arm_stlexd : Intrinsic::arm_strexd;
15426 return Builder.
CreateCall(Strex, {Lo, Hi, Addr});
15429 Intrinsic::ID Int = IsRelease ? Intrinsic::arm_stlex : Intrinsic::arm_strex;
15469 if (ElSize != 8 && ElSize != 16 && ElSize != 32)
15474 return VecSize == 64 || VecSize % 128 == 0;
15492 "Invalid interleave factor");
15493 assert(!Shuffles.
empty() &&
"Empty shufflevector input");
15495 "Unmatched number of shufflevectors and indices");
15521 if (NumLoads > 1) {
15538 Type *Tys[] = {VecTy, Int8Ptr};
15539 static const Intrinsic::ID LoadInts[3] = {Intrinsic::arm_neon_vld2,
15540 Intrinsic::arm_neon_vld3,
15541 Intrinsic::arm_neon_vld4};
15550 for (
unsigned LoadCount = 0; LoadCount < NumLoads; ++LoadCount) {
15566 for (
unsigned i = 0; i < Shuffles.
size(); i++) {
15568 unsigned Index = Indices[i];
15578 SubVecs[SV].push_back(SubVec);
15587 auto &SubVec = SubVecs[SVI];
15590 SVI->replaceAllUsesWith(WideVec);
15624 unsigned Factor)
const {
15626 "Invalid interleave factor");
15630 "Invalid interleaved store");
15636 const DataLayout &DL =
SI->getModule()->getDataLayout();
15665 Value *BaseAddr =
SI->getPointerOperand();
15667 if (NumStores > 1) {
15670 LaneLen /= NumStores;
15678 SI->getPointerAddressSpace()));
15686 Type *Tys[] = {Int8Ptr, SubVecTy};
15687 static const Intrinsic::ID StoreInts[3] = {Intrinsic::arm_neon_vst2,
15688 Intrinsic::arm_neon_vst3,
15689 Intrinsic::arm_neon_vst4};
15691 for (
unsigned StoreCount = 0; StoreCount < NumStores; ++StoreCount) {
15694 if (StoreCount > 0)
15696 BaseAddr, LaneLen * Factor);
15705 for (
unsigned i = 0; i < Factor; i++) {
15706 unsigned IdxI = StoreCount * LaneLen * Factor + i;
15707 if (Mask[IdxI] >= 0) {
15711 unsigned StartMask = 0;
15712 for (
unsigned j = 1; j < LaneLen; j++) {
15713 unsigned IdxJ = StoreCount * LaneLen * Factor + j;
15714 if (Mask[IdxJ * Factor + IdxI] >= 0) {
15715 StartMask = Mask[IdxJ * Factor + IdxI] - IdxJ;
15745 uint64_t &Members) {
15746 if (
auto *ST = dyn_cast<StructType>(Ty)) {
15747 for (
unsigned i = 0; i < ST->getNumElements(); ++i) {
15748 uint64_t SubMembers = 0;
15751 Members += SubMembers;
15753 }
else if (
auto *AT = dyn_cast<ArrayType>(Ty)) {
15754 uint64_t SubMembers = 0;
15757 Members += SubMembers * AT->getNumElements();
15768 }
else if (
auto *VT = dyn_cast<VectorType>(Ty)) {
15775 return VT->getBitWidth() == 64;
15777 return VT->getBitWidth() == 128;
15779 switch (VT->getBitWidth()) {
15792 return (Members > 0 && Members <= 4);
15812 if (getEffectiveCallingConv(CallConv, isVarArg) !=
15817 uint64_t Members = 0;
15822 return IsHA || IsIntArray;
15826 const Constant *PersonalityFn)
const {
15829 return Subtarget->
useSjLjEH() ? ARM::NoRegister : ARM::R0;
15833 const Constant *PersonalityFn)
const {
15836 return Subtarget->
useSjLjEH() ? ARM::NoRegister : ARM::R1;
15845void ARMTargetLowering::insertCopiesSplitCSR(
15859 RC = &ARM::GPRRegClass;
15860 else if (ARM::DPRRegClass.
contains(*
I))
15861 RC = &ARM::DPRRegClass;
15865 unsigned NewVR =
MRI->createVirtualRegister(RC);
15871 assert(
Entry->getParent()->getFunction().hasFnAttribute(
15872 Attribute::NoUnwind) &&
15873 "Function should be nounwind in insertCopiesSplitCSR!");
15879 for (
auto *Exit : Exits)
15881 TII->get(TargetOpcode::COPY), *
I)
unsigned const MachineRegisterInfo * MRI
static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, int64_t &Cnt)
isVShiftRImm - Check if this is a valid build_vector for the immediate operand of a vector shift righ...
static bool areExtractExts(Value *Ext1, Value *Ext2)
Check if Ext1 and Ext2 are extends of the same type, doubling the bitwidth of the vector elements.
static bool memOpAlign(unsigned DstAlign, unsigned SrcAlign, unsigned AlignCheck)
static EVT getExtensionTo64Bits(const EVT &OrigVT)
static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG, bool isSigned)
static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG)
static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG)
static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt)
getVShiftImm - Check if this is a valid build_vector for the immediate operand of a vector shift oper...
static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG)
static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG)
static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, SDValue RHS, SelectionDAG &DAG, const SDLoc &dl)
GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit the specified operations t...
static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG)
static SDValue createGPRPairNode(SelectionDAG &DAG, SDValue V)
static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt)
isVShiftLImm - Check if this is a valid build_vector for the immediate operand of a vector shift left...
static bool isSignExtended(SDNode *N, SelectionDAG &DAG)
static bool isZeroExtended(SDNode *N, SelectionDAG &DAG)
static const unsigned PerfectShuffleTable[6561+1]
amdgpu aa AMDGPU Address space based Alias Analysis Wrapper
static bool isConstant(const MachineInstr &MI)
amdgpu Simplify well known AMD library false FunctionCallee Callee
amdgpu Simplify well known AMD library false FunctionCallee Value const Twine & Name
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG)
static bool isStore(int Opcode)
static bool isThumb(const MCSubtargetInfo &STI)
static bool isVREVMask(ArrayRef< int > M, EVT VT, unsigned BlockSize)
isVREVMask - Check if a vector shuffle corresponds to a VREV instruction with the specified blocksize...
static bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, MachineFrameInfo &MFI, const MachineRegisterInfo *MRI, const TargetInstrInfo *TII)
MatchingStackOffset - Return true if the given stack call argument is already available in the same p...
static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode, ARMCC::CondCodes &CondCode2, bool &InvalidOnQNaN)
FPCCToARMCC - Convert a DAG fp condition code to an ARM CC.
static SDValue LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(SDValue Op, SelectionDAG &DAG)
static SDValue LowerShift(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue AddRequiredExtensionForVMULL(SDValue N, SelectionDAG &DAG, const EVT &OrigTy, const EVT &ExtTy, unsigned ExtOpcode)
AddRequiredExtensionForVMULL - Add a sign/zero extension to extend the total value size to 64 bits.
static cl::opt< unsigned > ConstpoolPromotionMaxSize("arm-promote-constant-max-size", cl::Hidden, cl::desc("Maximum size of constant to promote into a constant pool"), cl::init(64))
static bool isZeroOrAllOnes(SDValue N, bool AllOnes)
static bool isVTBLMask(ArrayRef< int > M, EVT VT)
static cl::opt< bool > EnableConstpoolPromotion("arm-promote-constant", cl::Hidden, cl::desc("Enable / disable promotion of unnamed_addr constants into " "constant pools"), cl::init(false))
static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef, unsigned SplatBitSize, SelectionDAG &DAG, const SDLoc &dl, EVT &VT, bool is128Bits, NEONModImmType type)
isNEONModifiedImm - Check if the specified splat value corresponds to a valid vector constant for a N...
static const APInt * isPowerOf2Constant(SDValue V)
static SDValue PerformVCVTCombine(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
PerformVCVTCombine - VCVT (floating-point to fixed-point, Advanced SIMD) can replace combinations of ...
static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG)
static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC)
IntCCToARMCC - Convert a DAG integer condition code to an ARM CC.
static SDValue ConvertBooleanCarryToCarryFlag(SDValue BoolCarry, SelectionDAG &DAG)
static bool isGTorGE(ISD::CondCode CC)
static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
CombineVLDDUP - For a VDUPLANE node N, check if its source operand is a vldN-lane (N > 1) intrinsic,...
static SDValue ParseBFI(SDNode *N, APInt &ToMask, APInt &FromMask)
static bool isReverseMask(ArrayRef< int > M, EVT VT)
static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG)
static bool isVZIP_v_undef_Mask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of "vector_shuffle v,...
static SDValue AddCombineTo64bitUMAAL(SDNode *AddeNode, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG)
static bool isVTRNMask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
static SDValue AddCombineToVPADD(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static SDValue PerformShiftCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *ST)
PerformShiftCombine - Checks for immediate versions of vector shifts and lowers them.
static void ExpandREAD_REGISTER(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG)
static SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG)
static SDValue PerformARMBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
Target-specific dag combine xforms for ARMISD::BUILD_VECTOR.
static bool isSRL16(const SDValue &Op)
static SDValue combineSelectAndUseCommutative(SDNode *N, bool AllOnes, TargetLowering::DAGCombinerInfo &DCI)
static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
static bool isLTorLE(ISD::CondCode CC)
static SDValue LowerSDIV_v4i16(SDValue N0, SDValue N1, const SDLoc &dl, SelectionDAG &DAG)
static SDValue AddCombineTo64bitMLAL(SDNode *AddeSubeNode, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static SDValue LowerWRITE_REGISTER(SDValue Op, SelectionDAG &DAG)
static bool checkAndUpdateCPSRKill(MachineBasicBlock::iterator SelectItr, MachineBasicBlock *BB, const TargetRegisterInfo *TRI)
static SDValue PerformBFICombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static bool hasNormalLoadOperand(SDNode *N)
hasNormalLoadOperand - Check if any of the operands of a BUILD_VECTOR node are normal,...
static SDValue PerformInsertEltCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
PerformInsertEltCombine - Target-specific dag combine xforms for ISD::INSERT_VECTOR_ELT.
static cl::opt< unsigned > ConstpoolPromotionMaxTotal("arm-promote-constant-max-total", cl::Hidden, cl::desc("Maximum size of ALL constants to promote into a constant pool"), cl::init(128))
static RTLIB::Libcall getDivRemLibcall(const SDNode *N, MVT::SimpleValueType SVT)
static SDValue PerformABSCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static SDValue SkipLoadExtensionForVMULL(LoadSDNode *LD, SelectionDAG &DAG)
SkipLoadExtensionForVMULL - return a load of the original vector size that does not do any sign/zero ...
static SDValue AddCombineVUZPToVPADDL(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static const MCPhysReg GPRArgRegs[]
static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformADDCombineWithOperands - Try DAG combinations for an ADD with operands N0 and N1.
static bool isVZIPMask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
static SDValue PerformORCombineToSMULWBT(SDNode *OR, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static bool isVTRN_v_undef_Mask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of "vector_shuffle v,...
static SDValue FindBFIToCombineWith(SDNode *N)
static SDValue ConvertCarryFlagToBooleanCarry(SDValue Flags, EVT VT, SelectionDAG &DAG)
static void checkVSELConstraints(ISD::CondCode CC, ARMCC::CondCodes &CondCode, bool &swpCmpOps, bool &swpVselOps)
static void ReplaceLongIntrinsic(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG)
static bool isS16(const SDValue &Op, SelectionDAG &DAG)
static bool isSRA16(const SDValue &Op)
static SDValue AddCombineBUILD_VECTORToVPADDL(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static SDValue LowerInterruptReturn(SmallVectorImpl< SDValue > &RetOps, const SDLoc &DL, SelectionDAG &DAG)
static SDValue LowerSDIV_v4i8(SDValue X, SDValue Y, const SDLoc &dl, SelectionDAG &DAG)
static void expandf64Toi32(SDValue Op, SelectionDAG &DAG, SDValue &RetVal1, SDValue &RetVal2)
static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue PerformVLDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static bool isSaturatingConditional(const SDValue &Op, SDValue &V, uint64_t &K, bool &usat)
static bool isUpperSaturate(const SDValue LHS, const SDValue RHS, const SDValue TrueVal, const SDValue FalseVal, const ISD::CondCode CC, const SDValue K)
static bool isSHL16(const SDValue &Op)
static bool isVEXTMask(ArrayRef< int > M, EVT VT, bool &ReverseVEXT, unsigned &Imm)
static SDValue PerformADDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.
static unsigned getLdOpcode(unsigned LdSize, bool IsThumb1, bool IsThumb2)
Return the load opcode for a given load size.
static bool isLegalT2AddressImmediate(int64_t V, EVT VT, const ARMSubtarget *Subtarget)
static bool isLegalMVEShuffleOp(unsigned PFEntry)
static bool isVUZPMask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG)
PerformVECTOR_SHUFFLECombine - Target-specific dag combine xforms for ISD::VECTOR_SHUFFLE.
static SDValue SkipExtensionForVMULL(SDNode *N, SelectionDAG &DAG)
SkipExtensionForVMULL - For a node that is a SIGN_EXTEND, ZERO_EXTEND, extending load,...
static MachineBasicBlock * OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ)
static SDValue PerformAddcSubcCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static TargetLowering::ArgListTy getDivRemArgList(const SDNode *N, LLVMContext *Context, const ARMSubtarget *Subtarget)
static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, const SDLoc &dl)
getZeroVector - Returns a vector of specified type with all zero elements.
static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG)
static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT, bool isSEXTLoad, SDValue &Base, SDValue &Offset, bool &isInc, SelectionDAG &DAG)
static cl::opt< bool > ARMInterworking("arm-interworking", cl::Hidden, cl::desc("Enable / disable ARM interworking (for debugging only)"), cl::init(true))
static void ReplaceREADCYCLECOUNTER(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
static SDValue PerformORCombineToBFI(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
ExpandBITCAST - If the target supports VFP, this function is called to expand a bit convert where eit...
static bool isConditionalZeroOrAllOnes(SDNode *N, bool AllOnes, SDValue &CC, bool &Invert, SDValue &OtherOp, SelectionDAG &DAG)
static SDValue PerformLOADCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static SDValue PerformAddeSubeCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static void ReplaceCMP_SWAP_64Results(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG)
static SDValue PerformVDUPCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformVDUPCombine - Target-specific dag combine xforms for ARMISD::VDUP.
static bool isLowerSaturate(const SDValue LHS, const SDValue RHS, const SDValue TrueVal, const SDValue FalseVal, const ISD::CondCode CC, const SDValue K)
static void emitPostSt(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos, const TargetInstrInfo *TII, const DebugLoc &dl, unsigned StSize, unsigned Data, unsigned AddrIn, unsigned AddrOut, bool IsThumb1, bool IsThumb2)
Emit a post-increment store operation with given size.
static SDValue CombineBaseUpdate(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
CombineBaseUpdate - Target-specific DAG combine function for VLDDUP, NEON load/store intrinsics,...
static SDValue PerformVMOVRRDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformVMOVRRDCombine - Target-specific dag combine xforms for ARMISD::VMOVRRD.
static SDValue WinDBZCheckDenominator(SelectionDAG &DAG, SDNode *N, SDValue InChain)
static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op, ArrayRef< int > ShuffleMask, SelectionDAG &DAG)
static SDValue PerformVMULCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformVMULCombine Distribute (A + B) * C to (A * C) + (B * C) to take advantage of the special multi...
static SDValue PerformORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformORCombine - Target-specific dag combine xforms for ISD::OR.
static SDValue LowerADDSUBCARRY(SDValue Op, SelectionDAG &DAG)
static SDValue LowerFPOWI(SDValue Op, const ARMSubtarget &Subtarget, SelectionDAG &DAG)
static unsigned SelectPairHalf(unsigned Elements, ArrayRef< int > Mask, unsigned Index)
static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG)
static void emitPostLd(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos, const TargetInstrInfo *TII, const DebugLoc &dl, unsigned LdSize, unsigned Data, unsigned AddrIn, unsigned AddrOut, bool IsThumb1, bool IsThumb2)
Emit a post-increment load operation with given size.
static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG)
PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics.
static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG, const ARMSubtarget *ST, const SDLoc &dl)
static SDValue PerformXORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
std::pair< unsigned, const TargetRegisterClass * > RCPair
static SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp, TargetLowering::DAGCombinerInfo &DCI, bool AllOnes=false)
static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST)
PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND, ISD::ZERO_EXTEND,...
static SDValue CombineVMOVDRRCandidateWithVecOp(const SDNode *BC, SelectionDAG &DAG)
BC is a bitcast that is about to be turned into a VMOVDRR.
static SDValue promoteToConstantPool(const ARMTargetLowering *TLI, const GlobalValue *GV, SelectionDAG &DAG, EVT PtrVT, const SDLoc &dl)
static unsigned isNEONTwoResultShuffleMask(ArrayRef< int > ShuffleMask, EVT VT, unsigned &WhichResult, bool &isV_UNDEF)
Check if ShuffleMask is a NEON two-result shuffle (VZIP, VUZP, VTRN), and return the corresponding AR...
static bool BitsProperlyConcatenate(const APInt &A, const APInt &B)
static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT, bool isSEXTLoad, SDValue &Base, SDValue &Offset, bool &isInc, SelectionDAG &DAG)
static SDValue PerformSUBCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB.
static bool allUsersAreInFunction(const Value *V, const Function *F)
Return true if all users of V are within function F, looking through ConstantExprs.
static SDValue PerformSTORECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
PerformSTORECombine - Target-specific dag combine xforms for ISD::STORE.
static bool isSingletonVEXTMask(ArrayRef< int > M, EVT VT, unsigned &Imm)
static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG)
PerformVMOVDRRCombine - Target-specific dag combine xforms for ARMISD::VMOVDRR.
static bool isLowerSaturatingConditional(const SDValue &Op, SDValue &V, SDValue &SatK)
static bool isLegalAddressImmediate(int64_t V, EVT VT, const ARMSubtarget *Subtarget)
isLegalAddressImmediate - Return true if the integer value can be used as the offset of the target ad...
static bool isLegalT1AddressImmediate(int64_t V, EVT VT)
static SDValue CombineANDShift(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static SDValue LowerUDIV(SDValue Op, SelectionDAG &DAG)
static SDValue LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG)
static SDValue PerformSHLSimplify(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *ST)
static SDValue PerformADDECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformADDECombine - Target-specific dag combine transform from ARMISD::ADDC, ARMISD::ADDE,...
static SDValue PerformUMLALCombine(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
static SDValue PerformHWLoopCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *ST)
static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG)
static bool isVUZP_v_undef_Mask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of "vector_shuffle v,...
static bool isHomogeneousAggregate(Type *Ty, HABaseType &Base, uint64_t &Members)
static SDValue PerformMULCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static SDValue PerformVDIVCombine(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
PerformVDIVCombine - VCVT (fixed-point to floating-point, Advanced SIMD) can replace combinations of ...
static SDValue PerformANDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static bool canChangeToInt(SDValue Op, bool &SeenZero, const ARMSubtarget *Subtarget)
canChangeToInt - Given the fp compare operand, return true if it is suitable to morph to an integer c...
static unsigned getStOpcode(unsigned StSize, bool IsThumb1, bool IsThumb2)
Return the store opcode for a given store size.
static bool IsVUZPShuffleNode(SDNode *N)
static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue AddCombineTo64BitSMLAL16(SDNode *AddcNode, SDNode *AddeNode, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static void attachMEMCPYScratchRegs(const ARMSubtarget *Subtarget, MachineInstr &MI, const SDNode *Node)
Attaches vregs to MEMCPY that it will use as scratch registers when it is expanded into LDM/STM.
static bool isFloatingPointZero(SDValue Op)
isFloatingPointZero - Return true if this is +0.0.
static SDValue findMUL_LOHI(SDValue V)
static SDValue PerformBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformBUILD_VECTORCombine - Target-specific dag combine xforms for ISD::BUILD_VECTOR.
static SDValue PerformVDUPLANECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
PerformVDUPLANECombine - Target-specific dag combine xforms for ARMISD::VDUPLANE.
Function Alias Analysis Results
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< ShadowStackGC > C("shadow-stack", "Very portable GC for uncooperative code generators")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
#define LLVM_FALLTHROUGH
LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static Optional< bool > isBigEndian(const SmallVector< int64_t, 4 > &ByteOffsets, int64_t FirstOffset)
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static Function * getFunction(Constant *C)
const HexagonInstrInfo * TII
std::pair< Value *, Value * > ShuffleOps
We are building a shuffle to create V, which is a sequence of insertelement, extractelement pairs.
static Value * LowerCTPOP(LLVMContext &Context, Value *V, Instruction *IP)
Emit the code to lower ctpop of V before the specified instruction IP.
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
unsigned const TargetRegisterInfo * TRI
static bool isVolatile(Instruction *Inst)
Module.h This file contains the declarations for the Module class.
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
static ManagedStatic< OptionRegistry > OR
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
separate const offset from Split GEPs to a variadic base and a constant offset for better CSE
#define STATISTIC(VARNAME, DESC)
static const int BlockSize
This file describes how to lower LLVM code to machine code.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
APInt bitcastToAPInt() const
Class for arbitrary precision integers.
uint64_t getZExtValue() const
Get zero extended value.
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
unsigned getActiveBits() const
Compute the number of active bits in the value.
APInt trunc(unsigned width) const
Truncate to new width.
bool sgt(const APInt &RHS) const
Signed greather than comparison.
unsigned getBitWidth() const
Return the number of bits in the APInt.
static APInt getAllOnesValue(unsigned numBits)
Get the all-ones value.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
unsigned countPopulation() const
Count the number of bits set.
unsigned countTrailingZeros() const
Count the number of trailing zero bits.
unsigned logBase2() const
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
bool isAllOnesValue() const
Determine if all bits are set.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Get a value with low bits set.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Get a value with high bits set.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
int64_t getSExtValue() const
Get sign extended value.
void lshrInPlace(unsigned ShiftAmt)
Logical right-shift this APInt by ShiftAmt in place.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
virtual const ARMBaseRegisterInfo & getRegisterInfo() const =0
const uint32_t * getSjLjDispatchPreservedMask(const MachineFunction &MF) const
const MCPhysReg * getCalleeSavedRegs(const MachineFunction *MF) const override
Code Generation virtual methods...
Register getFrameRegister(const MachineFunction &MF) const override
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override
const uint32_t * getTLSCallPreservedMask(const MachineFunction &MF) const
const uint32_t * getThisReturnPreservedMask(const MachineFunction &MF, CallingConv::ID) const
getThisReturnPreservedMask - Returns a call preserved mask specific to the case that 'returned' is on...
static ARMConstantPoolConstant * Create(const Constant *C, unsigned ID)
static ARMConstantPoolMBB * Create(LLVMContext &C, const MachineBasicBlock *mbb, unsigned ID, unsigned char PCAdj)
static ARMConstantPoolSymbol * Create(LLVMContext &C, StringRef s, unsigned ID, unsigned char PCAdj)
ARMConstantPoolValue - ARM specific constantpool value.
ARMFunctionInfo - This class is derived from MachineFunctionInfo and contains private ARM-specific in...
int getVarArgsFrameIndex() const
int getPromotedConstpoolIncrease() const
SmallPtrSet< const GlobalVariable *, 2 > & getGlobalsPromotedToConstantPool()
unsigned createPICLabelUId()
void setPromotedConstpoolIncrease(int Sz)
bool isThumb1OnlyFunction() const
void setArgRegsSaveSize(unsigned s)
void setReturnRegsCount(unsigned s)
void setVarArgsFrameIndex(int Index)
unsigned getArgRegsSaveSize() const
void markGlobalAsPromotedToConstantPool(const GlobalVariable *GV)
Indicate to the backend that GV has had its storage changed to inside a constant pool.
void setIsSplitCSR(bool s)
void setArgumentStackSize(unsigned size)
bool isTargetMachO() const
bool hasVMLxForwarding() const
bool hasRetAddrStack() const
bool isTargetAEABI() const
bool supportsTailCall() const
const Triple & getTargetTriple() const
const ARMBaseInstrInfo * getInstrInfo() const override
bool isThumb1Only() const
bool hasFPARMv8Base() const
bool isTargetWindows() const
bool isGVIndirectSymbol(const GlobalValue *GV) const
True if the GV will be accessed via an indirect symbol.
bool hasDivideInThumbMode() const
bool isTargetDarwin() const
const ARMBaseRegisterInfo * getRegisterInfo() const override
bool isTargetAndroid() const
bool isTargetCOFF() const
bool isTargetGNUAEABI() const
bool isTargetWatchOS() const
bool preferISHSTBarriers() const
bool genLongCalls() const
bool isFPBrccSlow() const
unsigned getPrefLoopAlignment() const
bool useNEONForSinglePrecisionFP() const
const InstrItineraryData * getInstrItineraryData() const override
getInstrItins - Return the instruction itineraries based on subtarget selection.
bool isTargetWatchABI() const
bool hasDataBarrier() const
bool hasAnyDataBarrier() const
bool allowsUnalignedMem() const
bool isTargetMuslAEABI() const
bool useSoftFloat() const
bool hasMPExtension() const
bool hasMVEFloatOps() const
bool hasDivideInARMMode() const
bool isTargetHardFloat() const
bool hasV8MBaselineOps() const
bool hasMVEIntegerOps() const
bool hasAcquireRelease() const
bool genExecuteOnly() const
bool isReadOnly(const GlobalValue *GV) const
bool shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize, unsigned &PrefAlign) const override
Return true if the pointer arguments to CI should be aligned by aligning the object whose address is ...
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const override
Returns how the given (atomic) load should be expanded by the IR-level AtomicExpand pass.
unsigned getNumInterleavedAccesses(VectorType *VecTy, const DataLayout &DL) const
Returns the number of interleaved accesses that will be generated when lowering accesses of the given...
bool shouldInsertFencesForAtomic(const Instruction *I) const override
Whether AtomicExpandPass should automatically insert fences and reduce ordering for this atomic.
void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
LowerAsmOperandForConstraint - Lower the specified operand into the Ops vector.
bool isDesirableToCommuteWithShift(const SDNode *N, CombineLevel Level) const override
Return true if it is profitable to move this shift by a constant amount though its operand,...
ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const override
Examine constraint string and operand type and determine a weight value.
unsigned getMaxSupportedInterleaveFactor() const override
Get the maximum supported factor for interleaved memory accesses.
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...
const ARMSubtarget * getSubtarget() const
bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const
int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS) const override
getScalingFactorCost - Return the cost of the scaling used in addressing mode represented by AM.
bool isLegalInterleavedAccessType(VectorType *VecTy, const DataLayout &DL) const
Returns true if VecTy is a legal interleaved access type.
bool isLegalT1ScaledAddressingMode(const AddrMode &AM, EVT VT) const
Returns true if the addresing mode representing by AM is legal for the Thumb1 target,...
bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override
getPreIndexedAddressParts - returns true by value, base pointer and offset pointer and addressing mod...
unsigned getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
bool shouldSinkOperands(Instruction *I, SmallVectorImpl< Use * > &Ops) const override
Check if sinking I's operands to I's basic block is profitable, because the operands can be folded in...
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
ReplaceNodeResults - Replace the results of node with an illegal result type with new values built ou...
bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const override
Return true if SHIFT instructions should be expanded to SHIFT_PARTS instructions, and false if a libr...
bool isLegalAddImmediate(int64_t Imm) const override
isLegalAddImmediate - Return true if the specified immediate is legal add immediate,...
bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override
Returns true if the given (atomic) store should be expanded by the IR-level AtomicExpand pass into an...
bool isFNegFree(EVT VT) const override
Return true if an fneg operation is free to the point where it is never worthwhile to replace it with...
void finalizeLowering(MachineFunction &MF) const override
Execute target specific actions to finalize target lowering.
unsigned getABIAlignmentForCallingConv(Type *ArgTy, DataLayout DL) const override
Return the correct alignment for the current calling convention.
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize=false) const override
isFPImmLegal - Returns true if the target can instruction select the specified FP immediate natively.
ConstraintType getConstraintType(StringRef Constraint) const override
getConstraintType - Given a constraint letter, return the type of constraint it is for this target.
bool preferIncOfAddToSubOfNot(EVT VT) const override
These two forms are equivalent: sub y, (xor x, -1) add (add x, 1), y The variant with two add's is IR...
Function * getSSPStackGuardCheck(const Module &M) const override
If the target has a standard stack protection check function that performs validation and error handl...
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
void insertSSPDeclarations(Module &M) const override
Inserts necessary declarations for SSP (stack protection) purpose.
bool ExpandInlineAsm(CallInst *CI) const override
This hook allows the target to expand an inline asm call to be explicit llvm code if it wants to.
SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const
PerformCMOVCombine - Target-specific DAG combining for ARMISD::CMOV.
Value * getSDagStackGuard(const Module &M) const override
Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nul...
bool shouldFoldConstantShiftPairToMask(const SDNode *N, CombineLevel Level) const override
Return true if it is profitable to fold a pair of shifts into a mask.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
getSetCCResultType - Return the value type to use for ISD::SETCC.
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
CCAssignFn * CCAssignFnForReturn(CallingConv::ID CC, bool isVarArg) const
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo) const override
createFastISel - This method returns a target specific FastISel object, or null if the target does no...
void AdjustInstrPostInstrSelection(MachineInstr &MI, SDNode *Node) const override
This method should be implemented by targets that mark instructions with the 'hasPostISelHook' flag.
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
Value * emitLoadLinked(IRBuilder<> &Builder, Value *Addr, AtomicOrdering Ord) const override
Perform a load-linked operation on Addr, returning a "Value *" with the corresponding pointee type.
bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, unsigned Index) const override
Return true if EXTRACT_SUBVECTOR is cheap for this result type with this index.
bool isCheapToSpeculateCttz() const override
Return true if it is cheap to speculate a call to intrinsic cttz.
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, MachineFunction &MF, unsigned Intrinsic) const override
getTgtMemIntrinsic - Represent NEON load and store intrinsics as MemIntrinsicNodes.
bool isTruncateFree(Type *SrcTy, Type *DstTy) const override
Return true if it's free to truncate a value of type FromTy to type ToTy.
bool isShuffleMaskLegal(ArrayRef< int > M, EVT VT) const override
isShuffleMaskLegal - Targets can use this to indicate that they only support some VECTOR_SHUFFLE oper...
bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const override
Returns true if it is beneficial to convert a load of a constant to just the constant itself.
Instruction * makeDMB(IRBuilder<> &Builder, ARM_MB::MemBOpt Domain) const
bool useLoadStackGuardNode() const override
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const override
getRegClassFor - Return the register class that should be used for the specified value type.
bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg) const override
Returns true if an argument of type Ty needs to be passed in a contiguous block of registers in calli...
std::pair< const TargetRegisterClass *, uint8_t > findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const override
Return the largest legal super-reg register class of the register class for the specified type and it...
bool isZExtFree(SDValue Val, EVT VT2) const override
Return true if zero-extending the specific node Val to type VT2 is free (either because it's implicit...
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
Value * emitStoreConditional(IRBuilder<> &Builder, Value *Val, Value *Addr, AtomicOrdering Ord) const override
Perform a store-conditional operation to Addr.
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace, unsigned Align, MachineMemOperand::Flags Flags, bool *Fast) const override
allowsMisalignedMemoryAccesses - Returns true if the target allows unaligned memory accesses of the s...
bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI, unsigned Factor) const override
Lower an interleaved store into a vstN intrinsic.
bool isCheapToSpeculateCtlz() const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
SDValue PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG) const
PerformBRCONDCombine - Target-specific DAG combining for ARMISD::BRCOND.
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign, bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc, const AttributeList &FuncAttributes) const override
Returns the target specific optimal type for load and store operations as a result of memset,...
bool isLegalICmpImmediate(int64_t Imm) const override
isLegalICmpImmediate - Return true if the specified immediate is legal icmp immediate,...
const char * LowerXConstraint(EVT ConstraintVT) const override
Try to replace an X constraint, which matches anything, with another that has more specific requireme...
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
bool targetShrinkDemandedConstant(SDValue Op, const APInt &Demanded, TargetLoweringOpt &TLO) const override
Instruction * emitLeadingFence(IRBuilder<> &Builder, Instruction *Inst, AtomicOrdering Ord) const override
Inserts in the IR a target-specific intrinsic specifying a fence.
bool isDesirableToTransformToIntegerOp(unsigned Opc, EVT VT) const override
Return true if it is profitable for dag combiner to transform a floating point op of specified opcode...
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override
Returns how the given atomic cmpxchg should be expanded by the IR-level AtomicExpand pass.
CCAssignFn * CCAssignFnForCall(CallingConv::ID CC, bool isVarArg) const
Instruction * emitTrailingFence(IRBuilder<> &Builder, Instruction *Inst, AtomicOrdering Ord) const override
bool isVectorLoadExtDesirable(SDValue ExtVal) const override
Return true if folding a vector load into ExtVal (a sign, zero, or any extend node) is profitable.
bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx, unsigned &Cost) const override
Return true if the target can combine store(extractelement VectorTy, Idx).
bool lowerInterleavedLoad(LoadInst *LI, ArrayRef< ShuffleVectorInst * > Shuffles, ArrayRef< unsigned > Indices, unsigned Factor) const override
Lower an interleaved load into a vldN intrinsic.
bool useSoftFloat() const override
bool alignLoopsWithOptSize() const override
Should loops be aligned even when the function is marked OptSize (but not MinSize).
SDValue PerformCMOVToBFICombine(SDNode *N, SelectionDAG &DAG) const
bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override
Return true if a truncation from FromTy to ToTy is permitted when deciding whether a call is in tail ...
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> &Builder) const override
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
unsigned getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override
getPostIndexedAddressParts - returns true by value, base pointer and offset pointer and addressing mo...
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
bool isFloatingPointOperation() const
bool hasFnAttribute(Attribute::AttrKind Kind) const
Equivalent to hasAttribute(AttributeList::FunctionIndex, Kind) but may be faster.
LLVM Basic Block Representation.
const Function * getParent() const
Return the enclosing method, or null if none.
The address of a basic block.
static BranchProbability getZero()
A "pseudo-class" with methods for operating on BUILD_VECTORs.
bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef, unsigned &SplatBitSize, bool &HasAnyUndefs, unsigned MinSplatBits=0, bool isBigEndian=false) const
Check if this is a constant splat, and if so, find the smallest element size that splats the vector.
int32_t getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements, uint32_t BitWidth) const
If this is a constant FP splat and the splatted constant FP is an exact power or 2,...
CCState - This class holds information needed while lowering arguments and return values.
void getInRegsParamInfo(unsigned InRegsParamRecordIndex, unsigned &BeginReg, unsigned &EndReg) const
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set,...
static bool resultsCompatible(CallingConv::ID CalleeCC, CallingConv::ID CallerCC, MachineFunction &MF, LLVMContext &C, const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn CalleeFn, CCAssignFn CallerFn)
Returns true if the results of the two calling conventions are compatible.
unsigned AllocateReg(unsigned Reg)
AllocateReg - Attempt to allocate one register.
unsigned getNextStackOffset() const
getNextStackOffset - Return the next stack offset such that all stack slots satisfy their alignment r...
void rewindByValRegsInfo()
unsigned getInRegsParamsProcessed() const
void addInRegsParamInfo(unsigned RegBegin, unsigned RegEnd)
void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeFormalArguments - Analyze an array of argument values, incorporating info about the formals in...
unsigned getInRegsParamsCount() const
CCValAssign - Represent assignment of one arg/retval to a location.
unsigned getLocMemOffset() const
Register getLocReg() const
LocInfo getLocInfo() const
unsigned getValNo() const
Value * getCalledValue() const
This class represents a function call, abstracting a target machine's calling convention.
BBTy * getParent() const
Get the basic block containing the call site.
bool isMustTailCall() const
Tests if this call site must be tail call optimized.
const APFloat & getValueAPF() const
ConstantFP - Floating Point Values [float, double].
uint64_t getZExtValue() const
const APInt & getAPIntValue() const
int64_t getSExtValue() const
This is an important base class in LLVM.
A parsed version of the target data layout string in and methods for querying it.
unsigned getPrefTypeAlignment(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
unsigned getPreferredAlignment(const GlobalVariable *GV) const
Returns the preferred alignment of the specified global.
unsigned getABITypeAlignment(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space.
unsigned getStackAlignment() const
uint64_t getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
StringRef getPrivateGlobalPrefix() const
uint64_t getTypeSizeInBits(Type *Ty) const
Size examples:
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
LLVM_NODISCARD bool empty() const
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
TargetLoweringBase::ArgListTy ArgListTy
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
bool CanLowerReturn
CanLowerReturn - true iff the function's return value can be lowered to registers.
Type * getParamType(unsigned i) const
Parameter type accessors.
FunctionType * getFunctionType() const
Returns the FunctionType for me.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
const GlobalValue * getGlobal() const
bool hasExternalWeakLinkage() const
bool hasDLLImportStorageClass() const
Module * getParent()
Get the module that this global value is contained inside of...
bool isStrongDefinitionForLinker() const
Returns true if this global's definition will be the one chosen by the linker.
@ InternalLinkage
Rename collisions when linking (static functions).
unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
TargetInstrInfo overrides.
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
BasicBlock * GetInsertBlock() const
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
IntegerType * getInt16Ty()
Fetch the type representing a 16-bit integer.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
PointerType * getInt8PtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer to an 8-bit integer value.
Type * getVoidTy()
Fetch the type representing void.
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Value * CreateZExtOrBitCast(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
Value * CreateTruncOrBitCast(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="")
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=None, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateConstGEP1_32(Value *Ptr, unsigned Idx0, const Twine &Name="")
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
const std::string & getConstraintString() const
const std::string & getAsmString() const
int getOperandCycle(unsigned ItinClassIndx, unsigned OperandIdx) const
Return the cycle for the given class and operand.
bool isEmpty() const
Returns true if there are no itineraries.
bool hasAtomicStore() const
Return true if this atomic instruction stores to memory.
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
const BasicBlock * getParent() const
Class to represent integer types.
unsigned getBitWidth() const
Get the number of bits in this IntegerType.
NodeT & get() const
get - Dereference as a NodeT reference.
static bool LowerToByteSwap(CallInst *CI)
Try to replace a call instruction with a call to a bswap intrinsic.
This is an important class for using LLVM in a threaded context.
An instruction for reading from memory.
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
Value * getPointerOperand()
unsigned getAlignment() const
Return the alignment of the access that is being performed.
This class is used to represent ISD::LOAD nodes.
Describe properties that are true of each instruction in the target description file.
unsigned getSchedClass() const
Return the scheduling class for this instruction.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specific constraint if it is set.
const MCOperandInfo * OpInfo
bool isOptionalDef() const
Set if this operand is a optional def.
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
static MVT getFloatingPointVT(unsigned BitWidth)
bool isInteger() const
Return true if this is an integer or a vector integer type.
static mvt_range integer_valuetypes()
unsigned getScalarSizeInBits() const
static mvt_range vector_valuetypes()
static mvt_range integer_vector_valuetypes()
static MVT getVectorVT(MVT VT, unsigned NumElements)
unsigned getSizeInBits() const
MVT getVectorElementType() const
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
static MVT getIntegerVT(unsigned BitWidth)
static mvt_range fp_valuetypes()
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
bool isEHPad() const
Returns true if the block is a landing pad.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void addLiveIn(MCPhysReg PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
bool isLiveIn(MCPhysReg Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
succ_iterator succ_begin()
std::vector< MachineBasicBlock * >::iterator succ_iterator
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
void removeSuccessor(MachineBasicBlock *Succ, bool NormalizeSuccProbs=false)
Remove successor from the successors list of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
void setIsEHPad(bool V=true)
Indicates the block is a landing pad.
The MachineConstantPool class keeps track of constants referenced by a function which must be spilled...
unsigned getConstantPoolIndex(const Constant *C, unsigned Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
int CreateStackObject(uint64_t Size, unsigned Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
void setAdjustsStack(bool V)
void setFrameAddressIsTaken(bool T)
void setReturnAddressIsTaken(bool s)
void computeMaxCallFrameSize(const MachineFunction &MF)
Computes the maximum size of a callframe and the AdjustsStack property.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
bool hasVAStart() const
Returns true if the function calls the llvm.va_start intrinsic.
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
bool isFixedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a fixed stack object.
int getFunctionContextIndex() const
Return the index for the function context object.
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=nullptr)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
unsigned getFunctionNumber() const
getFunctionNumber - Return a unique ID for the current function.
MachineJumpTableInfo * getOrCreateJumpTableInfo(unsigned JTEntryKind)
getOrCreateJumpTableInfo - Get the JumpTableInfo for this function, if it does already exist,...
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
void push_back(MachineBasicBlock *MBB)
const Function & getFunction() const
Return the LLVM function that this machine code represents.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, unsigned base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
MachineConstantPool * getConstantPool()
getConstantPool - Return the constant pool object for the current function.
bool hasCallSiteLandingPad(MCSymbol *Sym)
Return true if the landing pad Eh symbol has an associated call site.
unsigned addLiveIn(unsigned PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
void insert(iterator MBBI, MachineBasicBlock *MBB)
SmallVectorImpl< unsigned > & getCallSiteLandingPad(MCSymbol *Sym)
Get the call site indexes for a landing pad EH symbol.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned char TargetFlags=0) const
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
const MachineInstrBuilder & addJumpTableIndex(unsigned Idx, unsigned char TargetFlags=0) const
Representation of each machine instruction.
bool readsRegister(unsigned Reg, const TargetRegisterInfo *TRI=nullptr) const
Return true if the MachineInstr reads the specified register.
const MachineOperand & getOperand(unsigned i) const
bool definesRegister(unsigned Reg, const TargetRegisterInfo *TRI=nullptr) const
Return true if the MachineInstr fully defines the specified register.
unsigned createJumpTableIndex(const std::vector< MachineBasicBlock * > &DestBBs)
createJumpTableIndex - Create a new jump table.
@ EK_Inline
EK_Inline - Jump table entries are emitted inline at their point of use.
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MONonTemporal
The memory access is non-temporal.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
Flags getFlags() const
Return the raw flags of the source value,.
MachineOperand class - Representation of each machine instruction operand.
static MachineOperand CreateReg(unsigned Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
bool isReg() const
isReg - Tests if this is a MO_Register operand.
void setReg(unsigned Reg)
Change the register this operand corresponds to.
static MachineOperand CreateImm(int64_t Val)
Register getReg() const
getReg - Returns the register number.
void setIsDef(bool Val=true)
Change a def to a use, or a use to a def.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
This SDNode is used for target intrinsics that touch memory and need an associated MachineMemOperand.
This is an abstract virtual class for memory operations.
AAMDNodes getAAInfo() const
Returns the AA info that describes the dereference.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const MachinePointerInfo & getPointerInfo() const
const SDValue & getChain() const
unsigned getAlignment() const
EVT getMemoryVT() const
Return the type of the in-memory value.
CCAssignFn * CCAssignFnForReturn() const
CCAssignFn * CCAssignFnForCall() const
A Module instance is used to store all the information related to an LLVM module.
virtual void print(raw_ostream &OS, const Module *M) const
print - Print out the internal state of the pass.
Class to represent pointers.
Type * getElementType() const
const PseudoSourceValue * getJumpTable()
Return a pseudo source value referencing a jump table.
const PseudoSourceValue * getGOT()
Return a pseudo source value referencing the global offset table (or something the like).
const PseudoSourceValue * getStack()
Return a pseudo source value referencing the area below the stack frame of a function,...
A static registration template.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
This class provides iterator support for SDUse operands that use a specific SDNode.
Represents one node in the SelectionDAG.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
bool hasOneUse() const
Return true if there is exactly one use of this node.
bool isOnlyUserOf(const SDNode *N) const
Return true if this node is the only use of N.
MVT getSimpleValueType(unsigned ResNo) const
Return the type of a specified result as a simple type.
static bool hasPredecessorHelper(const SDNode *N, SmallPtrSetImpl< const SDNode * > &Visited, SmallVectorImpl< const SDNode * > &Worklist, unsigned int MaxSteps=0, bool TopologicalPrune=false)
Returns true if N is a predecessor of any node in Worklist.
bool use_empty() const
Return true if there are no uses of this node.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
unsigned getNumOperands() const
Return the number of values used by this operation.
const SDValue & getOperand(unsigned Num) const
use_iterator use_begin() const
Provide iteration support to walk over all uses of an SDNode.
bool isPredecessorOf(const SDNode *N) const
Return true if this node is a predecessor of N.
bool hasAnyUseOfValue(unsigned Value) const
Return true if there are any use of the indicated value.
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
const SDNodeFlags getFlags() const
static use_iterator use_end()
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
unsigned getScalarValueSizeInBits() const
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
unsigned getNumOperands() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
const TargetSubtargetInfo & getSubtarget() const
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
unsigned InferPtrAlignment(SDValue Ptr) const
Infer alignment of a load / store address.
SDValue UnrollVectorOp(SDNode *N, unsigned ResNE=0)
Utility function used by legalize and lowering to "unroll" a vector operation by splitting out the sc...
SDValue getTargetConstantPool(const Constant *C, EVT VT, unsigned Align=0, int Offset=0, unsigned char TargetFlags=0)
SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
const TargetLowering & getTargetLoweringInfo() const
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
const DataLayout & getDataLayout() const
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned char TargetFlags=0)
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget=false, bool IsOpaque=false)
void ReplaceAllUsesWith(SDValue From, SDValue To)
Modify anything using 'From' to use 'To' instead.
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
SDValue getRegister(unsigned Reg, EVT VT)
SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True, SDValue False, ISD::CondCode Cond)
Helper function to make it easier to build SelectCC's if you just have an ISD::CondCode instead of an...
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned char TargetFlags=0)
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, unsigned Align=0, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, unsigned Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
SDValue getRegisterMask(const uint32_t *RegMask)
SDValue getCondCode(ISD::CondCode Cond)
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
LLVMContext * getContext() const
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned char TargetFlags=0)
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
Type * getElementType() const
uint64_t getNumElements() const
For scalable vectors, this will return the minimum number of elements in the vector.
This instruction constructs a fixed permutation of two input vectors.
VectorType * getType() const
Overload to return most specific vector type.
static void getShuffleMask(const Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
static bool isSplatMask(const int *Mask, EVT VT)
int getMaskElt(unsigned Idx) const
int getSplatIndex() const
ArrayRef< int > getMask() const
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
LLVM_NODISCARD bool empty() const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
LLVM_NODISCARD T pop_back_val()
iterator insert(iterator I, T &&Elt)
typename SuperClass::iterator iterator
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
This class is used to represent ISD::STORE nodes.
const SDValue & getBasePtr() const
const SDValue & getValue() const
bool isTruncatingStore() const
Return true if the op does a truncation before store.
StringRef - Represent a constant reference to a string, i.e.
const unsigned char * bytes_end() const
LLVM_NODISCARD size_t size() const
size - Get the string size.
const unsigned char * bytes_begin() const
A switch()-like statement whose cases are string literals.
LLVM_NODISCARD R Default(T Value)
StringSwitch & Case(StringLiteral S, T Value)
TargetInstrInfo - Interface to description of machine instruction set.
Provides information about what library functions are available for the current target.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
void setTargetDAGCombine(ISD::NodeType NT)
Targets should invoke this method for each target independent node that they want to provide a custom...
virtual void finalizeLowering(MachineFunction &MF) const
Execute target specific actions to finalize target lowering.
bool PredictableSelectIsExpensive
Tells the code generator that select is more expensive than a branch if the branch is usually predict...
void setCmpLibcallCC(RTLIB::Libcall Call, ISD::CondCode CC)
Override the default CondCode to be used to test the result of the comparison libcall against zero.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const
Get the CallingConv that should be used for the specified libcall.
unsigned MaxStoresPerMemcpyOptSize
Maximum number of store operations that may be substituted for a call to memcpy, used for functions w...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
void setIndexedStoreAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed store does or does not work with the specified type and indicate ...
const TargetMachine & getTargetMachine() const
bool isOperationLegalOrCustom(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC)
Set the CallingConv that should be used for the specified libcall.
virtual Value * getSDagStackGuard(const Module &M) const
Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nul...
void setPrefLoopAlignment(unsigned Align)
Set the target's preferred loop alignment.
virtual Function * getSSPStackGuardCheck(const Module &M) const
If the target has a standard stack protection check function that performs validation and error handl...
Sched::Preference getSchedulingPreference() const
Return target scheduling preference.
void setMinFunctionAlignment(unsigned Align)
Set the target's minimum function alignment (in log2(bytes))
unsigned MaxStoresPerMemsetOptSize
Maximum number of stores operations that may be substituted for the call to memset,...
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
unsigned MaxStoresPerMemmove
Specify maximum bytes of store instructions per memmove call.
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL, bool LegalTypes=true) const
unsigned MaxStoresPerMemmoveOptSize
Maximum number of store instructions that may be substituted for a call to memmove,...
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
@ ZeroOrOneBooleanContent
@ ZeroOrNegativeOneBooleanContent
void setStackPointerRegisterToSaveRestore(unsigned R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setMinStackArgumentAlignment(unsigned Align)
Set the minimum stack alignment of an argument (in log2(bytes)).
virtual std::pair< const TargetRegisterClass *, uint8_t > findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const
Return the largest legal super-reg register class of the register class for the specified type and it...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
std::vector< ArgListEntry > ArgListTy
unsigned MaxStoresPerMemcpy
Specify maximum bytes of store instructions per memcpy call.
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
virtual void insertSSPDeclarations(Module &M) const
Inserts necessary declarations for SSP (stack protection) purpose.
void setIndexedLoadAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed load does or does not work with the specified type and indicate w...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
bool expandABS(SDNode *N, SDValue &Result, SelectionDAG &DAG) const
Expand ABS nodes.
virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS, SDValue &NewRHS, ISD::CondCode &CCCode, const SDLoc &DL) const
Soften the operands of a comparison.
bool parametersInCSRMatch(const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask, const SmallVectorImpl< CCValAssign > &ArgLocs, const SmallVectorImpl< SDValue > &OutVals) const
Check whether parameters to a call that are passed in callee saved registers are the same as from the...
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
bool isPositionIndependent() const
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0, bool AssumeSingleUse=false) const
Look at Op.
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node, SDValue &Chain) const
Check whether a given call node is in tail position within its function.
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, ArrayRef< SDValue > Ops, bool isSigned, const SDLoc &dl, bool doesNotReturn=false, bool isReturnValueUsed=true, bool isPostTypeLegalization=false) const
Returns a pair of (return value, chain).
Primary interface to the complete machine description for the target machine.
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
const Triple & getTargetTriple() const
bool useEmulatedTLS() const
Returns true if this target uses emulated TLS.
virtual const TargetSubtargetInfo * getSubtargetImpl(const Function &) const
Virtual method implemented by subclasses that returns a reference to that target's TargetSubtargetInf...
unsigned EnableFastISel
EnableFastISel - This flag enables fast-path instruction selection which trades away generated code q...
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
static bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
Triple - Helper class for working with autoconf configuration names.
ObjectFormatType getObjectFormat() const
getFormat - Get the object format for this triple.
bool isOSMSVCRT() const
Is this a "Windows" OS targeting a "MSVCRT.dll" environment.
bool isOSVersionLT(unsigned Major, unsigned Minor=0, unsigned Micro=0) const
isOSVersionLT - Helper function for doing comparisons against version numbers included in the target ...
bool isWindowsMSVCEnvironment() const
Checks if the environment could be MSVC.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
PointerType * getPointerTo(unsigned AddrSpace=0) const
Return a pointer to the current type.
bool isArrayTy() const
True if this is an instance of ArrayType.
bool isPointerTy() const
True if this is an instance of PointerType.
Type * getArrayElementType() const
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
Type * getVectorElementType() const
unsigned getVectorNumElements() const
bool isHalfTy() const
Return true if this is 'half', a 16-bit IEEE fp type.
unsigned getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
bool isFloatingPointTy() const
Return true if this is one of the six floating-point types.
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
iterator_range< user_iterator > users()
Class to represent vector types.
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
static CondCodes getOppositeCondition(CondCodes CC)
@ SECREL
Thread Pointer Offset.
@ SBREL
Section Relative (Windows TLS)
@ GOTTPOFF
Global Offset Table, PC Relative.
@ TPOFF
Global Offset Table, Thread Pointer Offset.
TOF
Target Operand Flag enum.
@ MO_NONLAZY
MO_NONLAZY - This is an independent flag, on a symbol operand "FOO" it represents a symbol which,...
@ MO_SBREL
MO_SBREL - On a symbol operand, this represents a static base relative relocation.
@ MO_DLLIMPORT
MO_DLLIMPORT - On a symbol operand, this represents that the reference to the symbol is for an import...
@ MO_GOT
MO_GOT - On a symbol operand, this represents a GOT relative relocation.
@ MO_COFFSTUB
MO_COFFSTUB - On a symbol operand "FOO", this indicates that the reference is actually to the "....
static ShiftOpc getShiftOpcForNode(unsigned Opcode)
int getSOImmVal(unsigned Arg)
getSOImmVal - Given a 32-bit immediate, if it is something that can fit into an shifter_operand immed...
int getFP32Imm(const APInt &Imm)
getFP32Imm - Return an 8-bit floating-point version of the 32-bit floating-point value.
unsigned getAM2Offset(unsigned AM2Opc)
bool isThumbImmShiftedVal(unsigned V)
isThumbImmShiftedVal - Return true if the specified value can be obtained by left shifting a 8-bit im...
int getT2SOImmVal(unsigned Arg)
getT2SOImmVal - Given a 32-bit immediate, if it is something that can fit into a Thumb-2 shifter_oper...
int getFP64Imm(const APInt &Imm)
getFP64Imm - Return an 8-bit floating-point version of the 64-bit floating-point value.
unsigned createNEONModImm(unsigned OpCmode, unsigned Val)
uint64_t decodeNEONModImm(unsigned ModImm, unsigned &EltBits)
decodeNEONModImm - Decode a NEON modified immediate value into the element value and the element size...
int getFP16Imm(const APInt &Imm)
getFP16Imm - Return an 8-bit floating-point version of the 16-bit floating-point value.
unsigned getSORegOpc(ShiftOpc ShOp, unsigned Imm)
AddrOpc getAM2Op(unsigned AM2Opc)
bool isBitFieldInvertedMask(unsigned v)
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ ARM_APCS
ARM_APCS - ARM Procedure Calling Standard calling convention (obsolete, but still used on some target...
@ ARM_AAPCS
ARM_AAPCS - ARM Architecture Procedure Calling Standard calling convention (aka EABI).
@ Fast
Fast - This calling convention attempts to make calls as fast as possible (e.g.
@ ARM_AAPCS_VFP
ARM_AAPCS_VFP - Same as ARM_AAPCS, but uses hard floating point ABI.
@ C
C - The default llvm calling convention, compatible with C.
bool isNON_EXTLoad(const SDNode *N)
Returns true if the specified node is a non-extending load.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ FLT_ROUNDS_
FLT_ROUNDS_ - Returns current rounding mode: -1 Undefined 0 Round to 0 1 Round to nearest 2 Round to ...
@ EH_SJLJ_LONGJMP
OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer) This corresponds to the eh.sjlj.longjmp intrinsic.
@ FGETSIGN
INT = FGETSIGN(FP) - Return the sign bit of the specified floating point value as an integer 0/1 valu...
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ BSWAP
Byte Swap and Counting operators.
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val) This corresponds to "store atomic" instruction.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ EH_SJLJ_SETUP_DISPATCH
OUTCHAIN = EH_SJLJ_SETUP_DISPATCH(INCHAIN) The target initializes the dispatch table here.
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ FADD
Simple binary floating point operators.
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ FP16_TO_FP
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
@ SIGN_EXTEND
Conversion operators.
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
@ ADDCARRY
Carry-using nodes for multiple precision addition and subtraction.
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
@ SETCCCARRY
Like SetCC, ops #0 and #1 are the LHS and RHS operands to compare, but op #2 is a boolean indicating ...
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ BR_CC
BR_CC - Conditional branch.
@ SSUBO
Same for subtraction.
@ BR_JT
BR_JT - Jumptable branch.
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
@ UNDEF
UNDEF - An undefined node.
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR (an vector value) starting with the ...
@ READ_REGISTER
READ_REGISTER, WRITE_REGISTER - This node represents llvm.register on the DAG, which implements the n...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ DEBUGTRAP
DEBUGTRAP - Trap intended to get the attention of a debugger.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ ATOMIC_CMP_SWAP
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo,...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
@ SMULO
Same for multiplication.
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum or signed or unsigned integers.
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ READCYCLECOUNTER
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ TRAP
TRAP - Trapping instruction.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ EH_SJLJ_SETJMP
RESULT, OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer) This corresponds to the eh.sjlj....
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ BRCOND
BRCOND - Conditional branch.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a vector with the specified, possibly variable,...
bool isNormalStore(const SDNode *N)
Returns true if the specified node is a non-truncating and unindexed store.
bool isZEXTLoad(const SDNode *N)
Returns true if the specified node is a ZEXTLOAD.
bool isEXTLoad(const SDNode *N)
Returns true if the specified node is a EXTLOAD.
CondCode getSetCCSwappedOperands(CondCode Operation)
Return the operation corresponding to (Y op X) when given the operation for (X op Y).
bool isBuildVectorAllZeros(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are 0 or undef.
CondCode getSetCCInverse(CondCode Operation, bool isInteger)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
bool isSEXTLoad(const SDNode *N)
Returns true if the specified node is a SEXTLOAD.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
static const int LAST_INDEXED_MODE
bool isNormalLoad(const SDNode *N)
Returns true if the specified node is a non-extending and unindexed load.
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Flag
These should be considered private to the implementation of the MCInstrDesc class.
bool match(Val *V, const Pattern &P)
match_combine_or< CastClass_match< OpTy, Instruction::ZExt >, CastClass_match< OpTy, Instruction::SExt > > m_ZExtOrSExt(const OpTy &Op)
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Libcall getSINTTOFP(EVT OpVT, EVT RetVT)
getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
Libcall getUINTTOFP(EVT OpVT, EVT RetVT)
getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit.
Libcall getFPTOUINT(EVT OpVT, EVT RetVT)
getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
Libcall getFPTOSINT(EVT OpVT, EVT RetVT)
getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
Libcall getFPEXT(EVT OpVT, EVT RetVT)
getFPEXT - Return the FPEXT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
Libcall getFPROUND(EVT OpVT, EVT RetVT)
getFPROUND - Return the FPROUND_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
Reg
All possible values of the reg field in the ModR/M byte.
initializer< Ty > init(const Ty &Val)
This class represents lattice values for constants.
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
constexpr bool isUInt< 16 >(uint64_t x)
constexpr bool isUInt< 8 >(uint64_t x)
bool CC_ARM_APCS_GHC(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
bool RetCC_ARM_AAPCS_VFP(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
bool operator==(uint64_t V1, const APInt &V2)
bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
constexpr bool isMask_32(uint32_t Value)
Return true if the argument is a non-empty sequence of ones starting at the least significant bit wit...
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
bool FastCC_ARM_APCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
bool RetCC_ARM_AAPCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
bool isAcquireOrStronger(AtomicOrdering ao)
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
auto find(R &&Range, const T &Val) -> decltype(adl_begin(Range))
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.
static std::array< MachineOperand, 2 > predOps(ARMCC::CondCodes Pred, unsigned PredReg=0)
Get the operands corresponding to the given Pred value.
bool CC_ARM_AAPCS_VFP(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
constexpr bool isShiftedMask_32(uint32_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (32 bit ver...
bool CC_ARM_APCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
void SplitString(StringRef Source, SmallVectorImpl< StringRef > &OutFragments, StringRef Delimiters=" \t\n\v\f\r")
SplitString - Split up the specified string according to the specified delimiters,...
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
bool RetCC_ARM_APCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
unsigned countLeadingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0's from the most significant bit to the least stopping at the first 1.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
constexpr size_t array_lengthof(T(&)[N])
Find the length of an array.
unsigned countTrailingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0's from the least significant bit to the most stopping at the first 1.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Constant * createSequentialMask(IRBuilder<> &Builder, unsigned Start, unsigned NumInts, unsigned NumUndefs)
Create a sequential shuffle mask.
unsigned countTrailingOnes(T Value, ZeroBehavior ZB=ZB_Width)
Count the number of ones from the least significant bit to the first zero bit.
@ Mod
The access may modify the value stored in memory.
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
bool isIntN(unsigned N, int64_t x)
Checks if an signed integer fits into the given (dynamic) bit width.
bool RetFastCC_ARM_APCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
bool CC_ARM_AAPCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
constexpr uint64_t MinAlign(uint64_t A, uint64_t B)
A and B are either alignments or offsets.
auto count_if(R &&Range, UnaryPredicate P) -> typename std::iterator_traits< decltype(adl_begin(Range))>::difference_type
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
raw_ostream & errs()
This returns a reference to a raw_ostream for standard error.
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
AtomicOrdering
Atomic ordering for LLVM's memory model.
static MachineOperand t1CondCodeOp(bool isDead=false)
Get the operand corresponding to the conditional code result for Thumb1.
Value * concatenateVectors(IRBuilder<> &Builder, ArrayRef< Value * > Vecs)
Concatenate a list of vectors.
bool isReleaseOrStronger(AtomicOrdering ao)
bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
static MachineOperand condCodeOp(unsigned CCReg=0)
Get the operand corresponding to the conditional code result.
bool isStrongerThanMonotonic(AtomicOrdering ao)
unsigned convertAddSubFlagsOpcode(unsigned OldOpc)
Map pseudo instructions that imply an 'S' bit onto real opcodes.
bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
unsigned getSizeInBits() const
Return the size of the specified value type in bits.
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
unsigned getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
bool is128BitVector() const
Return true if this is a 128-bit vector type.
static EVT getFloatingPointVT(unsigned BitWidth)
Returns the EVT that represents a floating-point type with the given number of bits.
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
unsigned getScalarSizeInBits() const
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool isInteger() const
Return true if this is an integer or a vector integer type.
bool is64BitVector() const
Return true if this is a 64-bit vector type.
CallLoweringInfo & setCallee(Type *ResultTy, FunctionType *FuncTy, const Value *Target, ArgListTy &&ArgsList, ImmutableCallSite &Call)
unsigned getByValSize() const
unsigned getByValAlign() const
bool isUnknown() const
Returns true if we don't know any bits.
unsigned getBitWidth() const
Get the bit width of this value.
void resetAll()
Resets the known state of all bits.
KnownBits sext(unsigned BitWidth) const
Sign extends the underlying known Zero and One bits.
KnownBits zext(unsigned BitWidth, bool ExtendedBitsAreKnownZero) const
Extends the underlying known Zero and One bits.
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
This contains information for each constraint that we are lowering.
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
CallLoweringInfo & setTailCall(bool Value=true)
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
CallLoweringInfo & setChain(SDValue InChain)
void AddToWorklist(SDNode *N)
bool isCalledByLegalizer() const
bool isBeforeLegalize() const
SDValue CombineTo(SDNode *N, ArrayRef< SDValue > To, bool AddTo=true)
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...
bool CombineTo(SDValue O, SDValue N)